1
1
Fork 1
mirror of https://github.com/oddlama/nix-config.git synced 2025-10-11 07:10:39 +02:00

feat: wip: begin modularizing microvm definitions to allow for other

guest types like containers
This commit is contained in:
oddlama 2023-12-16 22:19:54 +01:00
parent 8d734287e2
commit 76a8f6e247
No known key found for this signature in database
GPG key ID: 14EFE510775FE39A
5 changed files with 263 additions and 227 deletions

View file

@ -42,6 +42,9 @@
'';
networking.extraHosts = "127.0.0.1 modules-cdn.eac-prod.on.epicgames.com";
systemd.services."systemd-networkd".environment.SYSTEMD_LOG_LEVEL = "debug";
systemd.services."systemd-resolved".environment.SYSTEMD_LOG_LEVEL = "debug";
graphical.gaming.enable = true;
stylix.fonts.sizes = {
@ -50,4 +53,7 @@
terminal = 20;
popups = 20;
};
nix.settings.trusted-substituters = ["https://ai.cachix.org"];
nix.settings.trusted-public-keys = ["ai.cachix.org-1:N9dzRK+alWwoKXQlnn0H6aUx0lU/mspIoz8hMvGvbbc="];
}

View file

@ -48,6 +48,39 @@
./microvms/common.nix
];
#guests.adguardhome = {
# backend = "microvm";
# microvm = {
# system = "x86_64-linux";
# autostart = true;
# };
# zfs = {
# enable = true;
# pool = "rpool";
# };
# modules = [ ./guests/adguardhome.nix ];
#};
guests = let
mkMicrovm = system: module: {
backend = "microvm";
microvm = {
system = "x86_64-linux";
autostart = true;
};
zfs = {
enable = true;
pool = "rpool";
};
modules = [
../../modules
module
];
};
in {
adguardhome = mkMicrovm "x86_64-linux" ./guests/adguardhome.nix;
};
meta.microvms.vms = let
defaultConfig = name: {
system = "x86_64-linux";

View file

@ -127,7 +127,6 @@ in {
meta.microvms.networking = {
baseMac = config.repo.secrets.local.networking.interfaces.lan.mac;
macvtapInterface = "lan";
wireguard.openFirewallRules = ["lan-to-local"];
};
# Allow accessing influx

View file

@ -1,10 +1,14 @@
{lib, ...} @ args: {
{
config,
lib,
...
}: {
# IP addresses: ${"${interface} \e{halfbright}\4{${interface}}\e{reset} \e{halfbright}\6{${interface}}\e{reset}"}
environment.etc.issue.text = lib.concatStringsSep "\n" ([
''\d \t''
''This is \e{cyan}\n\e{reset} [\e{lightblue}\l\e{reset}] (\s \m \r)''
]
# Disabled for microvms because of frequent redraws (-> pushed to syslog on the host)
++ lib.optional (!(args ? parentNodeName)) ''\e{halfbright}\4\e{reset} \e{halfbright}\6\e{reset}''
# Disabled for guests because of frequent redraws (-> pushed to syslog on the host)
++ lib.optional (!config.guests.isGuest) ''\e{halfbright}\4\e{reset} \e{halfbright}\6\e{reset}''
++ [""]);
}

View file

@ -10,11 +10,12 @@
inherit
(lib)
attrNames
attrValues
any
disko
escapeShellArg
makeBinPath
mapAttrsToList
mdDoc
mergeToplevelConfigs
mkDefault
mkEnableOption
@ -26,36 +27,36 @@
types
;
cfg = config.meta.microvms;
cfg = config.guests;
nodeName = config.node.name;
inherit (cfg) vms;
inherit (cfg) guests;
# Configuration for each microvm
microvmConfig = vmName: vmCfg: {
# Configuration required on the host for a specific guest
defineGuest = guestName: guestCfg: {
# Add the required datasets to the disko configuration of the machine
disko.devices.zpool = mkIf vmCfg.zfs.enable {
${vmCfg.zfs.pool}.datasets.${vmCfg.zfs.dataset} =
disko.zfs.filesystem vmCfg.zfs.mountpoint;
disko.devices.zpool = mkIf guestCfg.zfs.enable {
${guestCfg.zfs.pool}.datasets.${guestCfg.zfs.dataset} =
disko.zfs.filesystem guestCfg.zfs.mountpoint;
};
# Ensure that the zfs dataset exists before it is mounted.
systemd.services = let
fsMountUnit = "${utils.escapeSystemdPath vmCfg.zfs.mountpoint}.mount";
fsMountUnit = "${utils.escapeSystemdPath guestCfg.zfs.mountpoint}.mount";
in
mkIf vmCfg.zfs.enable {
mkIf guestCfg.zfs.enable {
# Ensure that the zfs dataset exists before it is mounted.
"zfs-ensure-${utils.escapeSystemdPath vmCfg.zfs.mountpoint}" = {
"zfs-ensure-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}" = {
wantedBy = [fsMountUnit];
before = [fsMountUnit];
after = [
"zfs-import-${utils.escapeSystemdPath vmCfg.zfs.pool}.service"
"zfs-import-${utils.escapeSystemdPath guestCfg.zfs.pool}.service"
"zfs-mount.target"
];
unitConfig.DefaultDependencies = "no";
serviceConfig.Type = "oneshot";
script = let
poolDataset = "${vmCfg.zfs.pool}/${vmCfg.zfs.dataset}";
diskoDataset = config.disko.devices.zpool.${vmCfg.zfs.pool}.datasets.${vmCfg.zfs.dataset};
poolDataset = "${guestCfg.zfs.pool}/${guestCfg.zfs.dataset}";
diskoDataset = config.disko.devices.zpool.${guestCfg.zfs.pool}.datasets.${guestCfg.zfs.dataset};
in ''
export PATH=${makeBinPath [pkgs.zfs]}":$PATH"
if ! zfs list -H -o type ${escapeShellArg poolDataset} &>/dev/null ; then
@ -65,38 +66,39 @@
};
# Ensure that the zfs dataset has the correct permissions when mounted
"zfs-chown-${utils.escapeSystemdPath vmCfg.zfs.mountpoint}" = {
"zfs-chown-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}" = {
after = [fsMountUnit];
unitConfig.DefaultDependencies = "no";
serviceConfig.Type = "oneshot";
script = ''
chmod 700 ${escapeShellArg vmCfg.zfs.mountpoint}
chmod 700 ${escapeShellArg guestCfg.zfs.mountpoint}
'';
};
"microvm@${vmName}" = {
requires = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath vmCfg.zfs.mountpoint}.service"];
after = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath vmCfg.zfs.mountpoint}.service"];
"microvm@${guestName}" = mkIf (guestCfg.backend == "microvm") {
requires = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}.service"];
after = [fsMountUnit "zfs-chown-${utils.escapeSystemdPath guestCfg.zfs.mountpoint}.service"];
};
};
microvm.vms.${vmName} = let
mac = (net.mac.assignMacs "02:01:27:00:00:00" 24 [] (attrNames vms)).${vmName};
in {
microvm.vms.${guestName} = let
mac = (net.mac.assignMacs "02:01:27:00:00:00" 24 [] (attrNames guests)).${guestName};
in
mkIf (guestCfg.backend == "microvm") {
# Allow children microvms to know which node is their parent
specialArgs = {
parentNode = config;
parentNodeName = nodeName;
inherit (inputs.self) nodes;
inherit (inputs.self.pkgs.${vmCfg.system}) lib;
inherit (inputs.self.pkgs.${guestCfg.microvm.system}) lib;
inherit inputs;
inherit minimal;
};
pkgs = inputs.self.pkgs.${vmCfg.system};
inherit (vmCfg) autostart;
pkgs = inputs.self.pkgs.${guestCfg.microvm.system};
inherit (guestCfg) autostart;
config = {
imports = cfg.commonImports ++ vmCfg.modules;
node.name = vmCfg.nodeName;
imports = guestCfg.modules;
node.name = guestCfg.nodeName;
node.isGuest = true;
# TODO needed because of https://github.com/NixOS/nixpkgs/issues/102137
environment.noXlibs = mkForce false;
@ -112,7 +114,7 @@
interfaces = [
{
type = "macvtap";
id = "vm-${vmName}";
id = "vm-${guestName}";
inherit mac;
macvtap = {
link = cfg.networking.macvtapInterface;
@ -131,15 +133,15 @@
proto = "virtiofs";
}
{
source = "/state/vms/${vmName}";
source = "/state/guests/${guestName}";
mountPoint = "/state";
tag = "state";
proto = "virtiofs";
}
]
# Mount persistent data from the host
++ optional vmCfg.zfs.enable {
source = vmCfg.zfs.mountpoint;
++ optional guestCfg.zfs.enable {
source = guestCfg.zfs.mountpoint;
mountPoint = "/persist";
tag = "persist";
proto = "virtiofs";
@ -159,10 +161,10 @@
gc.automatic = mkForce false;
};
networking.renameInterfacesByMac.${vmCfg.networking.mainLinkName} = mac;
networking.renameInterfacesByMac.${guestCfg.networking.mainLinkName} = mac;
systemd.network.networks = {
"10-${vmCfg.networking.mainLinkName}" = {
"10-${guestCfg.networking.mainLinkName}" = {
matchConfig.MACAddress = mac;
DHCP = "yes";
dhcpV4Config.UseDNS = false;
@ -178,68 +180,50 @@
};
networking.nftables.firewall = {
zones.untrusted.interfaces = [vmCfg.networking.mainLinkName];
zones.untrusted.interfaces = [guestCfg.networking.mainLinkName];
};
};
};
containers.${guestName} =
mkIf (guestCfg.backend == "microvm") {
};
};
in {
imports = [
# Add the host module, but only enable if it necessary
inputs.microvm.nixosModules.host
# This is opt-out, so we can't put this into the mkIf below
{microvm.host.enable = vms != {};}
{
microvm.host.enable =
any
(guestCfg: guestCfg.backend == "microvm")
(attrValues guests);
}
];
options.meta.microvms = {
commonImports = mkOption {
type = types.listOf types.unspecified;
default = [];
description = mdDoc "Modules to import on all microvms.";
options.node.isGuest = mkOption {
type = types.bool;
description = "Whether this machine is a guest on another machine.";
default = false;
};
networking = {
baseMac = mkOption {
type = types.net.mac;
description = mdDoc ''
This MAC address will be used as a base address to derive all MicroVM MAC addresses from.
A good practise is to use the physical address of the macvtap interface.
'';
};
# networking = {
# baseMac = mkOption {
# type = types.net.mac;
# description = ''
# This MAC address will be used as a base address to derive all MicroVM MAC addresses from.
# A good practise is to use the physical address of the macvtap interface.
# '';
# };
#
# macvtapInterface = mkOption {
# type = types.str;
# description = "The macvtap interface to which MicroVMs should be attached";
# };
# };
macvtapInterface = mkOption {
type = types.str;
description = mdDoc "The macvtap interface to which MicroVMs should be attached";
};
wireguard = {
cidrv4 = mkOption {
type = types.net.cidrv4;
description = mdDoc "The ipv4 network address range to use for internal vm traffic.";
default = "172.31.0.0/24";
};
cidrv6 = mkOption {
type = types.net.cidrv6;
description = mdDoc "The ipv6 network address range to use for internal vm traffic.";
default = "fd00:172:31::/120";
};
port = mkOption {
default = 51829;
type = types.port;
description = mdDoc "The port to listen on.";
};
openFirewallRules = mkOption {
default = [];
type = types.listOf types.str;
description = mdDoc "The {option}`port` will be opened for all of the given rules in the nftable-firewall.";
};
};
};
vms = mkOption {
options.guests = mkOption {
default = {};
description = "Defines the actual vms and handles the necessary base setup for them.";
type = types.attrsOf (types.submodule ({name, ...}: {
@ -247,62 +231,72 @@ in {
nodeName = mkOption {
type = types.str;
default = "${nodeName}-${name}";
description = mdDoc ''
description = ''
The name of the resulting node. By default this will be a compound name
of the host's name and the vm's name to avoid name clashes. Can be
overwritten to designate special names to specific vms.
'';
};
backend = mkOption {
type = types.enum ["microvm" "container"];
description = ''
Determines how the guest will be hosted. You can currently choose
between microvm based deployment, or nixos containers.
'';
};
# Options for the microvm backend
microvm = {
system = mkOption {
type = types.str;
description = "The system that this microvm should use";
};
};
networking = {
mainLinkName = mkOption {
type = types.str;
default = "wan";
description = mdDoc "The main ethernet link name inside of the VM";
description = "The main ethernet link name inside of the VM";
};
};
zfs = {
enable = mkEnableOption (mdDoc "persistent data on separate zfs dataset");
enable = mkEnableOption "persistent data on separate zfs dataset";
pool = mkOption {
type = types.str;
description = mdDoc "The host's zfs pool on which the dataset resides";
description = "The host's zfs pool on which the dataset resides";
};
dataset = mkOption {
type = types.str;
default = "safe/vms/${name}";
description = mdDoc "The host's dataset that should be used for this vm's state (will automatically be created, parent dataset must exist)";
default = "safe/guests/${name}";
description = "The host's dataset that should be used for this vm's state (will automatically be created, parent dataset must exist)";
};
mountpoint = mkOption {
type = types.str;
default = "/vms/${name}";
description = mdDoc "The host's mountpoint for the vm's dataset (will be shared via virtiofs as /persist in the vm)";
default = "/guests/${name}";
description = "The host's mountpoint for the vm's dataset (will be shared via virtiofs as /persist in the vm)";
};
};
autostart = mkOption {
type = types.bool;
default = false;
description = mdDoc "Whether this VM should be started automatically with the host";
};
system = mkOption {
type = types.str;
description = mdDoc "The system that this microvm should use";
description = "Whether this VM should be started automatically with the host";
};
modules = mkOption {
type = types.listOf types.unspecified;
default = [];
description = mdDoc "Additional modules to load";
description = "Additional modules to load";
};
};
}));
};
};
config = mkIf (vms != {}) (mergeToplevelConfigs ["disko" "microvm" "systemd"] (mapAttrsToList microvmConfig vms));
config = mkIf (guests != {}) (mergeToplevelConfigs ["disko" "microvm" "systemd"] (mapAttrsToList defineGuest guests));
}