feat: wip: begin modularizing microvm definitions to allow for other

guest types like containers
This commit is contained in:
oddlama 2023-12-16 22:19:54 +01:00
parent 8d734287e2
commit 76a8f6e247
No known key found for this signature in database
GPG key ID: 14EFE510775FE39A
5 changed files with 263 additions and 227 deletions

View file

@ -42,6 +42,9 @@
''; '';
networking.extraHosts = "127.0.0.1 modules-cdn.eac-prod.on.epicgames.com"; networking.extraHosts = "127.0.0.1 modules-cdn.eac-prod.on.epicgames.com";
systemd.services."systemd-networkd".environment.SYSTEMD_LOG_LEVEL = "debug";
systemd.services."systemd-resolved".environment.SYSTEMD_LOG_LEVEL = "debug";
graphical.gaming.enable = true; graphical.gaming.enable = true;
stylix.fonts.sizes = { stylix.fonts.sizes = {
@ -50,4 +53,7 @@
terminal = 20; terminal = 20;
popups = 20; popups = 20;
}; };
nix.settings.trusted-substituters = ["https://ai.cachix.org"];
nix.settings.trusted-public-keys = ["ai.cachix.org-1:N9dzRK+alWwoKXQlnn0H6aUx0lU/mspIoz8hMvGvbbc="];
} }

View file

@ -48,6 +48,39 @@
./microvms/common.nix ./microvms/common.nix
]; ];
#guests.adguardhome = {
# backend = "microvm";
# microvm = {
# system = "x86_64-linux";
# autostart = true;
# };
# zfs = {
# enable = true;
# pool = "rpool";
# };
# modules = [ ./guests/adguardhome.nix ];
#};
guests = let
mkMicrovm = system: module: {
backend = "microvm";
microvm = {
system = "x86_64-linux";
autostart = true;
};
zfs = {
enable = true;
pool = "rpool";
};
modules = [
../../modules
module
];
};
in {
adguardhome = mkMicrovm "x86_64-linux" ./guests/adguardhome.nix;
};
meta.microvms.vms = let meta.microvms.vms = let
defaultConfig = name: { defaultConfig = name: {
system = "x86_64-linux"; system = "x86_64-linux";

View file

@ -127,7 +127,6 @@ in {
meta.microvms.networking = { meta.microvms.networking = {
baseMac = config.repo.secrets.local.networking.interfaces.lan.mac; baseMac = config.repo.secrets.local.networking.interfaces.lan.mac;
macvtapInterface = "lan"; macvtapInterface = "lan";
wireguard.openFirewallRules = ["lan-to-local"];
}; };
# Allow accessing influx # Allow accessing influx

View file

@ -1,10 +1,14 @@
{lib, ...} @ args: { {
config,
lib,
...
}: {
# IP addresses: ${"${interface} \e{halfbright}\4{${interface}}\e{reset} \e{halfbright}\6{${interface}}\e{reset}"} # IP addresses: ${"${interface} \e{halfbright}\4{${interface}}\e{reset} \e{halfbright}\6{${interface}}\e{reset}"}
environment.etc.issue.text = lib.concatStringsSep "\n" ([ environment.etc.issue.text = lib.concatStringsSep "\n" ([
''\d \t'' ''\d \t''
''This is \e{cyan}\n\e{reset} [\e{lightblue}\l\e{reset}] (\s \m \r)'' ''This is \e{cyan}\n\e{reset} [\e{lightblue}\l\e{reset}] (\s \m \r)''
] ]
# Disabled for microvms because of frequent redraws (-> pushed to syslog on the host) # Disabled for guests because of frequent redraws (-> pushed to syslog on the host)
++ lib.optional (!(args ? parentNodeName)) ''\e{halfbright}\4\e{reset} \e{halfbright}\6\e{reset}'' ++ lib.optional (!config.guests.isGuest) ''\e{halfbright}\4\e{reset} \e{halfbright}\6\e{reset}''
++ [""]); ++ [""]);
} }

View file

@ -10,11 +10,12 @@
inherit inherit
(lib) (lib)
attrNames attrNames
attrValues
any
disko disko
escapeShellArg escapeShellArg
makeBinPath makeBinPath
mapAttrsToList mapAttrsToList
mdDoc
mergeToplevelConfigs mergeToplevelConfigs
mkDefault mkDefault
mkEnableOption mkEnableOption
@ -26,36 +27,36 @@
types types
; ;
cfg = config.meta.microvms; cfg = config.guests;
nodeName = config.node.name; nodeName = config.node.name;
inherit (cfg) vms; inherit (cfg) guests;
# Configuration for each microvm # Configuration required on the host for a specific guest
microvmConfig = vmName: vmCfg: { defineGuest = guestName: guestCfg: {
# Add the required datasets to the disko configuration of the machine # Add the required datasets to the disko configuration of the machine
disko.devices.zpool = mkIf vmCfg.zfs.enable { disko.devices.zpool = mkIf guestCfg.zfs.enable {
${vmCfg.zfs.pool}.datasets.${vmCfg.zfs.dataset} = ${guestCfg.zfs.pool}.datasets.${guestCfg.zfs.dataset} =
disko.zfs.filesystem vmCfg.zfs.mountpoint; disko.zfs.filesystem guestCfg.zfs.mountpoint;
}; };
# Ensure that the zfs dataset exists before it is mounted. # Ensure that the zfs dataset exists before it is mounted.
systemd.services = let systemd.services = let
fsMountUnit = "${utils.escapeSystemdPath vmCfg.zfs.mountpoint}.mount"; fsMountUnit = "${utils.escapeSystemdPath guestCfg.zfs.mountpoint}.mount";
in in
mkIf vmCfg.zfs.enable { mkIf guestCfg.zfs.enable {
# Ensure that the zfs dataset exists before it is mounted. # Ensure that the zfs dataset exists before it is mounted.
"zfs-ensure-${utils.escapeSystemdPath vmCfg.zfs.mountpoint}" = { "zfs-ensure-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}" = {
wantedBy = [fsMountUnit]; wantedBy = [fsMountUnit];
before = [fsMountUnit]; before = [fsMountUnit];
after = [ after = [
"zfs-import-${utils.escapeSystemdPath vmCfg.zfs.pool}.service" "zfs-import-${utils.escapeSystemdPath guestCfg.zfs.pool}.service"
"zfs-mount.target" "zfs-mount.target"
]; ];
unitConfig.DefaultDependencies = "no"; unitConfig.DefaultDependencies = "no";
serviceConfig.Type = "oneshot"; serviceConfig.Type = "oneshot";
script = let script = let
poolDataset = "${vmCfg.zfs.pool}/${vmCfg.zfs.dataset}"; poolDataset = "${guestCfg.zfs.pool}/${guestCfg.zfs.dataset}";
diskoDataset = config.disko.devices.zpool.${vmCfg.zfs.pool}.datasets.${vmCfg.zfs.dataset}; diskoDataset = config.disko.devices.zpool.${guestCfg.zfs.pool}.datasets.${guestCfg.zfs.dataset};
in '' in ''
export PATH=${makeBinPath [pkgs.zfs]}":$PATH" export PATH=${makeBinPath [pkgs.zfs]}":$PATH"
if ! zfs list -H -o type ${escapeShellArg poolDataset} &>/dev/null ; then if ! zfs list -H -o type ${escapeShellArg poolDataset} &>/dev/null ; then
@ -65,38 +66,39 @@
}; };
# Ensure that the zfs dataset has the correct permissions when mounted # Ensure that the zfs dataset has the correct permissions when mounted
"zfs-chown-${utils.escapeSystemdPath vmCfg.zfs.mountpoint}" = { "zfs-chown-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}" = {
after = [fsMountUnit]; after = [fsMountUnit];
unitConfig.DefaultDependencies = "no"; unitConfig.DefaultDependencies = "no";
serviceConfig.Type = "oneshot"; serviceConfig.Type = "oneshot";
script = '' script = ''
chmod 700 ${escapeShellArg vmCfg.zfs.mountpoint} chmod 700 ${escapeShellArg guestCfg.zfs.mountpoint}
''; '';
}; };
"microvm@${vmName}" = { "microvm@${guestName}" = mkIf (guestCfg.backend == "microvm") {
requires = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath vmCfg.zfs.mountpoint}.service"]; requires = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}.service"];
after = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath vmCfg.zfs.mountpoint}.service"]; after = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}.service"];
}; };
}; };
microvm.vms.${vmName} = let microvm.vms.${guestName} = let
mac = (net.mac.assignMacs "02:01:27:00:00:00" 24 [] (attrNames vms)).${vmName}; mac = (net.mac.assignMacs "02:01:27:00:00:00" 24 [] (attrNames guests)).${guestName};
in { in
mkIf (guestCfg.backend == "microvm") {
# Allow children microvms to know which node is their parent # Allow children microvms to know which node is their parent
specialArgs = { specialArgs = {
parentNode = config; parentNode = config;
parentNodeName = nodeName;
inherit (inputs.self) nodes; inherit (inputs.self) nodes;
inherit (inputs.self.pkgs.${vmCfg.system}) lib; inherit (inputs.self.pkgs.${guestCfg.microvm.system}) lib;
inherit inputs; inherit inputs;
inherit minimal; inherit minimal;
}; };
pkgs = inputs.self.pkgs.${vmCfg.system}; pkgs = inputs.self.pkgs.${guestCfg.microvm.system};
inherit (vmCfg) autostart; inherit (guestCfg) autostart;
config = { config = {
imports = cfg.commonImports ++ vmCfg.modules; imports = guestCfg.modules;
node.name = vmCfg.nodeName; node.name = guestCfg.nodeName;
node.isGuest = true;
# TODO needed because of https://github.com/NixOS/nixpkgs/issues/102137 # TODO needed because of https://github.com/NixOS/nixpkgs/issues/102137
environment.noXlibs = mkForce false; environment.noXlibs = mkForce false;
@ -112,7 +114,7 @@
interfaces = [ interfaces = [
{ {
type = "macvtap"; type = "macvtap";
id = "vm-${vmName}"; id = "vm-${guestName}";
inherit mac; inherit mac;
macvtap = { macvtap = {
link = cfg.networking.macvtapInterface; link = cfg.networking.macvtapInterface;
@ -131,15 +133,15 @@
proto = "virtiofs"; proto = "virtiofs";
} }
{ {
source = "/state/vms/${vmName}"; source = "/state/guests/${guestName}";
mountPoint = "/state"; mountPoint = "/state";
tag = "state"; tag = "state";
proto = "virtiofs"; proto = "virtiofs";
} }
] ]
# Mount persistent data from the host # Mount persistent data from the host
++ optional vmCfg.zfs.enable { ++ optional guestCfg.zfs.enable {
source = vmCfg.zfs.mountpoint; source = guestCfg.zfs.mountpoint;
mountPoint = "/persist"; mountPoint = "/persist";
tag = "persist"; tag = "persist";
proto = "virtiofs"; proto = "virtiofs";
@ -159,10 +161,10 @@
gc.automatic = mkForce false; gc.automatic = mkForce false;
}; };
networking.renameInterfacesByMac.${vmCfg.networking.mainLinkName} = mac; networking.renameInterfacesByMac.${guestCfg.networking.mainLinkName} = mac;
systemd.network.networks = { systemd.network.networks = {
"10-${vmCfg.networking.mainLinkName}" = { "10-${guestCfg.networking.mainLinkName}" = {
matchConfig.MACAddress = mac; matchConfig.MACAddress = mac;
DHCP = "yes"; DHCP = "yes";
dhcpV4Config.UseDNS = false; dhcpV4Config.UseDNS = false;
@ -178,68 +180,50 @@
}; };
networking.nftables.firewall = { networking.nftables.firewall = {
zones.untrusted.interfaces = [vmCfg.networking.mainLinkName]; zones.untrusted.interfaces = [guestCfg.networking.mainLinkName];
}; };
}; };
}; };
containers.${guestName} =
mkIf (guestCfg.backend == "microvm") {
};
}; };
in { in {
imports = [ imports = [
# Add the host module, but only enable if it necessary # Add the host module, but only enable if it necessary
inputs.microvm.nixosModules.host inputs.microvm.nixosModules.host
# This is opt-out, so we can't put this into the mkIf below # This is opt-out, so we can't put this into the mkIf below
{microvm.host.enable = vms != {};} {
microvm.host.enable =
any
(guestCfg: guestCfg.backend == "microvm")
(attrValues guests);
}
]; ];
options.meta.microvms = { options.node.isGuest = mkOption {
commonImports = mkOption { type = types.bool;
type = types.listOf types.unspecified; description = "Whether this machine is a guest on another machine.";
default = []; default = false;
description = mdDoc "Modules to import on all microvms.";
}; };
networking = { # networking = {
baseMac = mkOption { # baseMac = mkOption {
type = types.net.mac; # type = types.net.mac;
description = mdDoc '' # description = ''
This MAC address will be used as a base address to derive all MicroVM MAC addresses from. # This MAC address will be used as a base address to derive all MicroVM MAC addresses from.
A good practise is to use the physical address of the macvtap interface. # A good practise is to use the physical address of the macvtap interface.
''; # '';
}; # };
#
# macvtapInterface = mkOption {
# type = types.str;
# description = "The macvtap interface to which MicroVMs should be attached";
# };
# };
macvtapInterface = mkOption { options.guests = mkOption {
type = types.str;
description = mdDoc "The macvtap interface to which MicroVMs should be attached";
};
wireguard = {
cidrv4 = mkOption {
type = types.net.cidrv4;
description = mdDoc "The ipv4 network address range to use for internal vm traffic.";
default = "172.31.0.0/24";
};
cidrv6 = mkOption {
type = types.net.cidrv6;
description = mdDoc "The ipv6 network address range to use for internal vm traffic.";
default = "fd00:172:31::/120";
};
port = mkOption {
default = 51829;
type = types.port;
description = mdDoc "The port to listen on.";
};
openFirewallRules = mkOption {
default = [];
type = types.listOf types.str;
description = mdDoc "The {option}`port` will be opened for all of the given rules in the nftable-firewall.";
};
};
};
vms = mkOption {
default = {}; default = {};
description = "Defines the actual vms and handles the necessary base setup for them."; description = "Defines the actual vms and handles the necessary base setup for them.";
type = types.attrsOf (types.submodule ({name, ...}: { type = types.attrsOf (types.submodule ({name, ...}: {
@ -247,62 +231,72 @@ in {
nodeName = mkOption { nodeName = mkOption {
type = types.str; type = types.str;
default = "${nodeName}-${name}"; default = "${nodeName}-${name}";
description = mdDoc '' description = ''
The name of the resulting node. By default this will be a compound name The name of the resulting node. By default this will be a compound name
of the host's name and the vm's name to avoid name clashes. Can be of the host's name and the vm's name to avoid name clashes. Can be
overwritten to designate special names to specific vms. overwritten to designate special names to specific vms.
''; '';
}; };
backend = mkOption {
type = types.enum ["microvm" "container"];
description = ''
Determines how the guest will be hosted. You can currently choose
between microvm based deployment, or nixos containers.
'';
};
# Options for the microvm backend
microvm = {
system = mkOption {
type = types.str;
description = "The system that this microvm should use";
};
};
networking = { networking = {
mainLinkName = mkOption { mainLinkName = mkOption {
type = types.str; type = types.str;
default = "wan"; default = "wan";
description = mdDoc "The main ethernet link name inside of the VM"; description = "The main ethernet link name inside of the VM";
}; };
}; };
zfs = { zfs = {
enable = mkEnableOption (mdDoc "persistent data on separate zfs dataset"); enable = mkEnableOption "persistent data on separate zfs dataset";
pool = mkOption { pool = mkOption {
type = types.str; type = types.str;
description = mdDoc "The host's zfs pool on which the dataset resides"; description = "The host's zfs pool on which the dataset resides";
}; };
dataset = mkOption { dataset = mkOption {
type = types.str; type = types.str;
default = "safe/vms/${name}"; default = "safe/guests/${name}";
description = mdDoc "The host's dataset that should be used for this vm's state (will automatically be created, parent dataset must exist)"; description = "The host's dataset that should be used for this vm's state (will automatically be created, parent dataset must exist)";
}; };
mountpoint = mkOption { mountpoint = mkOption {
type = types.str; type = types.str;
default = "/vms/${name}"; default = "/guests/${name}";
description = mdDoc "The host's mountpoint for the vm's dataset (will be shared via virtiofs as /persist in the vm)"; description = "The host's mountpoint for the vm's dataset (will be shared via virtiofs as /persist in the vm)";
}; };
}; };
autostart = mkOption { autostart = mkOption {
type = types.bool; type = types.bool;
default = false; default = false;
description = mdDoc "Whether this VM should be started automatically with the host"; description = "Whether this VM should be started automatically with the host";
};
system = mkOption {
type = types.str;
description = mdDoc "The system that this microvm should use";
}; };
modules = mkOption { modules = mkOption {
type = types.listOf types.unspecified; type = types.listOf types.unspecified;
default = []; default = [];
description = mdDoc "Additional modules to load"; description = "Additional modules to load";
}; };
}; };
})); }));
}; };
};
config = mkIf (vms != {}) (mergeToplevelConfigs ["disko" "microvm" "systemd"] (mapAttrsToList microvmConfig vms)); config = mkIf (guests != {}) (mergeToplevelConfigs ["disko" "microvm" "systemd"] (mapAttrsToList defineGuest guests));
} }