1
1
Fork 1
mirror of https://github.com/oddlama/nix-config.git synced 2025-10-10 23:00:39 +02:00

wip: remove very specific special args and unify library functions

This commit is contained in:
oddlama 2023-06-30 01:55:17 +02:00
parent dfc3084fe9
commit 68bb9731d3
No known key found for this signature in database
GPG key ID: 14EFE510775FE39A
28 changed files with 594 additions and 644 deletions

View file

@ -68,18 +68,15 @@
outputs = {
self,
colmena,
nixos-generators,
nixpkgs,
microvm,
flake-utils,
agenix-rekey,
...
} @ inputs: let
recursiveMergeAttrs = nixpkgs.lib.foldl' nixpkgs.lib.recursiveUpdate {};
inherit (nixpkgs) lib;
in
{
extraLib = import ./nix/lib.nix inputs;
# The identities that are used to rekey agenix secrets and to
# decrypt all repository-wide secrets.
secretsConfig = {
@ -87,8 +84,8 @@
extraEncryptionPubkeys = [./secrets/backup.pub];
};
stateVersion = "23.05";
# This is the list of hosts that this flake defines, plus the minimum
# amount of metadata that is necessary to instanciate it correctly.
hosts = let
nixos = system: {
type = "nixos";
@ -101,21 +98,30 @@
zackbiene = nixos "aarch64-linux";
};
# This will process all defined hosts of type "nixos" and
# generate the required colmena definition for each host.
# We call the resulting instanciations "nodes".
# TODO: switch to nixosConfigurations once colmena supports it upstream
colmena = import ./nix/colmena.nix inputs;
colmenaNodes = ((colmena.lib.makeHive self.colmena).introspect (x: x)).nodes;
# Collect all defined microvm nodes from each colmena node
microvmNodes = nixpkgs.lib.concatMapAttrs (_: node:
nixpkgs.lib.mapAttrs'
(vm: def: nixpkgs.lib.nameValuePair def.nodeName node.config.microvm.vms.${vm}.config)
(node.config.meta.microvms.vms or {}))
self.colmenaNodes;
# Expose all nodes in a single attribute
# True NixOS nodes can define additional microvms (guest nodes) that are built
# together with the true host. We collect all defined microvm nodes
# from each node here to allow accessing any node via the unified attribute `nodes`.
microvmNodes = lib.flip lib.concatMapAttrs self.colmenaNodes (_: node:
lib.mapAttrs'
(vm: def: lib.nameValuePair def.nodeName node.config.microvm.vms.${vm}.config)
(node.config.meta.microvms.vms or {}));
# All nixosSystem instanciations are collected here, so that we can refer
# to any system via nodes.<name>
nodes = self.colmenaNodes // self.microvmNodes;
# Collect installer packages
# For each true NixOS system, we want to expose an installer image that
# can be used to do setup on the node.
inherit
(recursiveMergeAttrs
(nixpkgs.lib.mapAttrsToList
(lib.foldl' lib.recursiveUpdate {}
(lib.mapAttrsToList
(import ./nix/generate-installer.nix inputs)
self.colmenaNodes))
packages

View file

@ -1,17 +1,19 @@
{
nixos-hardware,
inputs,
pkgs,
...
}: {
imports = [
nixos-hardware.common-cpu-intel
nixos-hardware.common-gpu-intel
nixos-hardware.common-pc-laptop
nixos-hardware.common-pc-laptop-ssd
inputs.nixos-hardware.nixosModules.common-cpu-intel
inputs.nixos-hardware.nixosModules.common-gpu-intel
inputs.nixos-hardware.nixosModules.common-pc-laptop
inputs.nixos-hardware.nixosModules.common-pc-laptop-ssd
../../modules/optional/hardware/intel.nix
../../modules/optional/hardware/physical.nix
../../modules
#../../modules
../../modules/config/lib.nix
../../modules/optional/boot-efi.nix
../../modules/optional/initrd-ssh.nix
../../modules/optional/dev

View file

@ -1,7 +1,6 @@
{
config,
lib,
extraLib,
pkgs,
...
}: {
@ -10,7 +9,7 @@
m2-ssd = {
type = "disk";
device = "/dev/disk/by-id/${config.repo.secrets.local.disk.m2-ssd}";
content = with extraLib.disko.gpt; {
content = with config.lib.disko.gpt; {
type = "table";
format = "gpt";
partitions = [
@ -21,7 +20,7 @@
boot-ssd = {
type = "disk";
device = "/dev/disk/by-id/${config.repo.secrets.local.disk.boot-ssd}";
content = with extraLib.disko.gpt; {
content = with config.lib.disko.gpt; {
type = "table";
format = "gpt";
partitions = [
@ -31,25 +30,10 @@
};
};
};
zpool = with extraLib.disko.zfs; {
zpool = with config.lib.disko.zfs; {
rpool = defaultZpoolOptions // {datasets = defaultZfsDatasets;};
};
};
# TODO remove once this is upstreamed
boot.initrd.systemd.services."zfs-import-rpool".after = ["cryptsetup.target"];
fileSystems."/state".neededForBoot = true;
fileSystems."/persist".neededForBoot = true;
# After importing the rpool, rollback the root system to be empty.
boot.initrd.systemd.services.impermanence-root = {
wantedBy = ["initrd.target"];
after = ["zfs-import-rpool.service"];
before = ["sysroot.mount"];
unitConfig.DefaultDependencies = "no";
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.zfs}/bin/zfs rollback -r rpool/local/root@blank";
};
};
boot.initrd.luks.devices.enc-rpool.allowDiscards = true;
}

View file

@ -1,6 +1,5 @@
{
config,
extraLib,
pkgs,
...
}: {
@ -9,7 +8,7 @@
main = {
type = "disk";
device = "/dev/disk/by-id/${config.repo.secrets.local.disk.main}";
content = with extraLib.disko.gpt; {
content = with config.lib.disko.gpt; {
type = "table";
format = "gpt";
partitions = [
@ -20,27 +19,11 @@
};
};
};
zpool = with extraLib.disko.zfs; {
zpool = with config.lib.disko.zfs; {
rpool = defaultZpoolOptions // {datasets = defaultZfsDatasets;};
};
};
boot.loader.grub.devices = ["/dev/disk/by-id/${config.repo.secrets.local.disk.main}"];
boot.initrd.luks.devices.enc-rpool.allowDiscards = true;
# TODO remove once this is upstreamed
boot.initrd.systemd.services."zfs-import-rpool".after = ["cryptsetup.target"];
fileSystems."/state".neededForBoot = true;
fileSystems."/persist".neededForBoot = true;
# After importing the rpool, rollback the root system to be empty.
boot.initrd.systemd.services.impermanence-root = {
wantedBy = ["initrd.target"];
after = ["zfs-import-rpool.service"];
before = ["sysroot.mount"];
unitConfig.DefaultDependencies = "no";
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.zfs}/bin/zfs rollback -r rpool/local/root@blank";
};
};
}

View file

@ -1,12 +1,12 @@
{
inputs,
config,
nixos-hardware,
nodes,
...
}: {
imports = [
nixos-hardware.common-cpu-intel
nixos-hardware.common-pc-ssd
inputs.nixos-hardware.nixosModules.common-cpu-intel
inputs.nixos-hardware.nixosModules.common-pc-ssd
../../modules/optional/hardware/intel.nix
../../modules/optional/hardware/physical.nix
@ -50,6 +50,13 @@
enable = true;
pool = "rpool";
};
todo
configPath =
if nodePath != null && builtins.pathExists (nodePath + "/microvms/${name}") then
nodePath + "/microvms/${name}"
else if nodePath != null && builtins.pathExists (nodePath + "/microvms/${name}") then
nodePath + "/microvms/${name}.nix"
else null;
};
in {
kanidm = defaults;

View file

@ -1,7 +1,6 @@
{
config,
lib,
extraLib,
pkgs,
...
}: {
@ -10,7 +9,7 @@
m2-ssd = {
type = "disk";
device = "/dev/disk/by-id/${config.repo.secrets.local.disk.m2-ssd}";
content = with extraLib.disko.gpt; {
content = with config.lib.disko.gpt; {
type = "table";
format = "gpt";
partitions = [
@ -21,7 +20,7 @@
};
};
};
zpool = with extraLib.disko.zfs; {
zpool = with config.lib.disko.zfs; {
rpool =
defaultZpoolOptions
// {
@ -34,20 +33,5 @@
};
};
# TODO remove once this is upstreamed
boot.initrd.systemd.services."zfs-import-rpool".after = ["cryptsetup.target"];
fileSystems."/state".neededForBoot = true;
fileSystems."/persist".neededForBoot = true;
# After importing the rpool, rollback the root system to be empty.
boot.initrd.systemd.services.impermanence-root = {
wantedBy = ["initrd.target"];
after = ["zfs-import-rpool.service"];
before = ["sysroot.mount"];
unitConfig.DefaultDependencies = "no";
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.zfs}/bin/zfs rollback -r rpool/local/root@blank";
};
};
boot.initrd.luks.devices.enc-rpool.allowDiscards = true;
}

View file

@ -1,7 +1,6 @@
{
config,
lib,
nodeName,
nodes,
utils,
...
@ -123,7 +122,7 @@ in {
url = "https://${sentinelCfg.networking.providedDomains.loki}";
orgId = 1;
basicAuth = true;
basicAuthUser = "${nodeName}+grafana-loki-basic-auth-password";
basicAuthUser = "${config.repo.node.name}+grafana-loki-basic-auth-password";
secureJsonData.basicAuthPassword = "$__file{${config.age.secrets.grafana-loki-basic-auth-password.path}}";
}
];

View file

@ -1,10 +1,4 @@
{
lib,
config,
nixos-hardware,
pkgs,
...
}: {
{lib, ...}: {
imports = [
../../modules/optional/hardware/odroid-n2plus.nix

View file

@ -1,4 +1,5 @@
{
# TODO disko
fileSystems = {
"/" = {
device = "rpool/root/nixos";

View file

@ -8,6 +8,7 @@
# State that should be kept across reboots, but is otherwise
# NOT important information in any way that needs to be backed up.
fileSystems."/state".neededForBoot = true;
environment.persistence."/state" = {
hideMounts = true;
directories =
@ -44,6 +45,7 @@
};
# State that should be kept forever, and backed up accordingly.
fileSystems."/persist".neededForBoot = true;
environment.persistence."/persist" = {
hideMounts = true;
files = [

View file

@ -1,16 +1,244 @@
{
extraLib,
inputs,
lib,
...
}: {
}: let
inherit
(lib)
all
any
assertMsg
attrNames
attrValues
concatLists
concatMap
concatMapStrings
concatStringsSep
elem
escapeShellArg
filter
flatten
flip
foldAttrs
foldl'
genAttrs
genList
hasInfix
head
isAttrs
mapAttrs'
mergeAttrs
min
mkMerge
mkOptionType
nameValuePair
optionalAttrs
partition
range
recursiveUpdate
removeSuffix
reverseList
showOption
splitString
stringToCharacters
substring
types
unique
warnIf
;
in {
# IP address math library
# https://gist.github.com/duairc/5c9bb3c922e5d501a1edb9e7b3b845ba
# Plus some extensions by us
lib = let
libWithNet = (import "${inputs.lib-net}/net.nix" {inherit lib;}).lib;
in
lib.recursiveUpdate libWithNet {
recursiveUpdate libWithNet {
types = rec {
# Checks whether the value is a lazy value without causing
# it's value to be evaluated
isLazyValue = x: isAttrs x && x ? _lazyValue;
# Constructs a lazy value holding the given value.
lazyValue = value: {_lazyValue = value;};
# Represents a lazy value of the given type, which
# holds the actual value as an attrset like { _lazyValue = <actual value>; }.
# This allows the option to be defined and filtered from a defintion
# list without evaluating the value.
lazyValueOf = type:
mkOptionType rec {
name = "lazyValueOf ${type.name}";
inherit (type) description descriptionClass emptyValue getSubOptions getSubModules;
check = isLazyValue;
merge = loc: defs:
assert assertMsg
(all (x: type.check x._lazyValue) defs)
"The option `${showOption loc}` is defined with a lazy value holding an invalid type";
types.mergeOneOption loc defs;
substSubModules = m: types.uniq (type.substSubModules m);
functor = (types.defaultFunctor name) // {wrapped = type;};
nestedTypes.elemType = type;
};
# Represents a value or lazy value of the given type that will
# automatically be coerced to the given type when merged.
lazyOf = type: types.coercedTo (lazyValueOf type) (x: x._lazyValue) type;
};
misc = rec {
# Counts how often each element occurrs in xs
countOccurrences = let
addOrUpdate = acc: x:
acc // {${x} = (acc.${x} or 0) + 1;};
in
foldl' addOrUpdate {};
# Returns all elements in xs that occur at least twice
duplicates = xs: let
occurrences = countOccurrences xs;
in
unique (filter (x: occurrences.${x} > 1) xs);
# Concatenates all given attrsets as if calling a // b in order.
concatAttrs = foldl' mergeAttrs {};
# True if the path or string starts with /
isAbsolutePath = x: substring 0 1 x == "/";
# Merges all given attributes from the given attrsets using mkMerge.
# Useful to merge several top-level configs in a module.
mergeToplevelConfigs = keys: attrs:
genAttrs keys (attr: mkMerge (map (x: x.${attr} or {}) attrs));
# Calculates base^exp, but careful, this overflows for results > 2^62
pow = base: exp: foldl' (a: x: x * a) 1 (genList (_: base) exp);
# Converts the given hex string to an integer. Only reliable for inputs in [0, 2^63),
# after that the sign bit will overflow.
hexToDec = v: let
literalValues = {
"0" = 0;
"1" = 1;
"2" = 2;
"3" = 3;
"4" = 4;
"5" = 5;
"6" = 6;
"7" = 7;
"8" = 8;
"9" = 9;
"a" = 10;
"b" = 11;
"c" = 12;
"d" = 13;
"e" = 14;
"f" = 15;
"A" = 10;
"B" = 11;
"C" = 12;
"D" = 13;
"E" = 14;
"F" = 15;
};
in
foldl' (acc: x: acc * 16 + literalValues.${x}) 0 (stringToCharacters v);
};
disko = {
gpt = {
partGrub = name: start: end: {
inherit name start end;
part-type = "primary";
flags = ["bios_grub"];
};
partEfi = name: start: end: {
inherit name start end;
fs-type = "fat32";
bootable = true;
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
partSwap = name: start: end: {
inherit name start end;
fs-type = "linux-swap";
content = {
type = "swap";
randomEncryption = true;
};
};
partLuksZfs = name: start: end: {
inherit start end;
name = "enc-${name}";
content = {
type = "luks";
name = "enc-${name}";
extraOpenArgs = ["--allow-discards"];
content = {
type = "zfs";
pool = name;
};
};
};
};
zfs = rec {
defaultZpoolOptions = {
type = "zpool";
mountRoot = "/mnt";
rootFsOptions = {
compression = "zstd";
acltype = "posix";
atime = "off";
xattr = "sa";
dnodesize = "auto";
mountpoint = "none";
canmount = "off";
devices = "off";
};
options.ashift = "12";
};
defaultZfsDatasets = {
"local" = unmountable;
"local/root" =
filesystem "/"
// {
postCreateHook = "zfs snapshot rpool/local/root@blank";
};
"local/nix" = filesystem "/nix";
"local/state" = filesystem "/state";
"safe" = unmountable;
"safe/persist" = filesystem "/persist";
};
unmountable = {type = "zfs_fs";};
filesystem = mountpoint: {
type = "zfs_fs";
options = {
canmount = "noauto";
inherit mountpoint;
};
# Required to add dependencies for initrd
inherit mountpoint;
};
};
};
secrets = let
rageMasterIdentityArgs = concatMapStrings (x: "-i ${escapeShellArg x} ") inputs.self.secretsConfig.masterIdentities;
rageExtraEncryptionPubkeys =
concatMapStrings (
x:
if misc.isAbsolutePath x
then "-R ${escapeShellArg x} "
else "-r ${escapeShellArg x} "
)
inputs.self.secretsConfig.extraEncryptionPubkeys;
in {
# TODO replace these by lib.agenix-rekey
# The arguments required to de-/encrypt a secret in this repository
rageDecryptArgs = "${rageMasterIdentityArgs}";
rageEncryptArgs = "${rageMasterIdentityArgs} ${rageExtraEncryptionPubkeys}";
};
net = {
cidr = rec {
# host :: (ip | mac | integer) -> cidr -> ip
@ -33,7 +261,7 @@
host = i: n: let
cap = libWithNet.net.cidr.capacity n;
in
assert lib.assertMsg (i >= (-cap) && i < cap) "The host ${toString i} lies outside of ${n}";
assert assertMsg (i >= (-cap) && i < cap) "The host ${toString i} lies outside of ${n}";
libWithNet.net.cidr.host i n;
# hostCidr :: (ip | mac | integer) -> cidr -> cidr
#
@ -55,7 +283,7 @@
# "192.168.1.100"
# > net.cidr.ip "192.168.1.100"
# "192.168.1.100"
ip = x: lib.head (lib.splitString "/" x);
ip = x: head (splitString "/" x);
# canonicalize :: cidr -> cidr
#
# Replaces the ip of the cidr with the canonical network address
@ -78,32 +306,31 @@
mergev4 = addrs_: let
# Append /32 if necessary
addrs = map (x:
if lib.hasInfix "/" x
if hasInfix "/" x
then x
else "${x}/32")
addrs_;
# The smallest occurring length is the first we need to start checking, since
# any greater cidr length represents a smaller address range which
# wouldn't contain all of the original addresses.
startLength = lib.foldl' lib.min 32 (map libWithNet.net.cidr.length addrs);
possibleLengths = lib.reverseList (lib.range 0 startLength);
startLength = foldl' min 32 (map libWithNet.net.cidr.length addrs);
possibleLengths = reverseList (range 0 startLength);
# The first ip address will be "expanded" in cidr length until it covers all other
# used addresses.
firstIp = ip (lib.head addrs);
firstIp = ip (head addrs);
# Return the first (i.e. greatest length -> smallest prefix) cidr length
# in the list that covers all used addresses
bestLength = lib.head (lib.filter
bestLength = head (filter
# All given addresses must be contained by the generated address.
(len:
lib.all
(x:
all (x:
libWithNet.net.cidr.contains
(ip x)
(libWithNet.net.cidr.make len firstIp))
addrs)
possibleLengths);
in
assert lib.assertMsg (!lib.any (lib.hasInfix ":") addrs) "mergev4 cannot operate on ipv6 addresses";
assert assertMsg (!any (hasInfix ":") addrs) "mergev4 cannot operate on ipv6 addresses";
if addrs == []
then null
else libWithNet.net.cidr.make bestLength firstIp;
@ -119,32 +346,31 @@
mergev6 = addrs_: let
# Append /128 if necessary
addrs = map (x:
if lib.hasInfix "/" x
if hasInfix "/" x
then x
else "${x}/128")
addrs_;
# The smallest occurring length is the first we need to start checking, since
# any greater cidr length represents a smaller address range which
# wouldn't contain all of the original addresses.
startLength = lib.foldl' lib.min 128 (map libWithNet.net.cidr.length addrs);
possibleLengths = lib.reverseList (lib.range 0 startLength);
startLength = foldl' min 128 (map libWithNet.net.cidr.length addrs);
possibleLengths = reverseList (range 0 startLength);
# The first ip address will be "expanded" in cidr length until it covers all other
# used addresses.
firstIp = ip (lib.head addrs);
firstIp = ip (head addrs);
# Return the first (i.e. greatest length -> smallest prefix) cidr length
# in the list that covers all used addresses
bestLength = lib.head (lib.filter
bestLength = head (filter
# All given addresses must be contained by the generated address.
(len:
lib.all
(x:
all (x:
libWithNet.net.cidr.contains
(ip x)
(libWithNet.net.cidr.make len firstIp))
addrs)
possibleLengths);
in
assert lib.assertMsg (lib.all (lib.hasInfix ":") addrs) "mergev6 cannot operate on ipv4 addresses";
assert assertMsg (all (hasInfix ":") addrs) "mergev6 cannot operate on ipv4 addresses";
if addrs == []
then null
else libWithNet.net.cidr.make bestLength firstIp;
@ -154,7 +380,7 @@
# but yields two separate result for all given ipv4 and ipv6 addresses.
# Equivalent to calling mergev4 and mergev6 on a partition individually.
merge = addrs: let
v4_and_v6 = lib.partition (lib.hasInfix ":") addrs;
v4_and_v6 = partition (hasInfix ":") addrs;
in {
cidrv4 = mergev4 v4_and_v6.wrong;
cidrv6 = mergev6 v4_and_v6.right;
@ -186,9 +412,9 @@
# The network and broadcast address should never be used, and we
# want to reserve the host address for the host. We also convert
# any ips to offsets here.
init = lib.unique (
init = unique (
[0 (capacity - 1)]
++ lib.flip map reserved (x:
++ flip map reserved (x:
if builtins.typeOf x == "int"
then x
else -(libWithNet.net.ip.diff baseAddr x))
@ -197,17 +423,17 @@
nInit = builtins.length init;
# Pre-sort all hosts, to ensure ordering invariance
sortedHosts =
lib.warnIf
warnIf
((nInit + nHosts) > 0.3 * capacity)
"assignIps: hash stability may be degraded since utilization is >30%"
(builtins.sort builtins.lessThan hosts);
# Generates a hash (i.e. offset value) for a given hostname
hashElem = x:
builtins.bitAnd (capacity - 1)
(extraLib.hexToDec (builtins.substring 0 16 (builtins.hashString "sha256" x)));
(misc.hexToDec (builtins.substring 0 16 (builtins.hashString "sha256" x)));
# Do linear probing. Returns the first unused value at or after the given value.
probe = avoid: value:
if lib.elem value avoid
if elem value avoid
# TODO lib.mod
# Poor man's modulo, because nix has no modulo. Luckily we operate on a residue
# class of x modulo 2^n, so we can use bitAnd instead.
@ -228,12 +454,12 @@
used = [value] ++ used;
};
in
assert lib.assertMsg (cidrSize >= 2 && cidrSize <= 62)
assert assertMsg (cidrSize >= 2 && cidrSize <= 62)
"assignIps: cidrSize=${toString cidrSize} is not in [2, 62].";
assert lib.assertMsg (nHosts <= capacity - nInit)
assert assertMsg (nHosts <= capacity - nInit)
"assignIps: number of hosts (${toString nHosts}) must be <= capacity (${toString capacity}) - reserved (${toString nInit})";
# Assign an ip in the subnet to each element, in order
(lib.foldl' assignOne {
(foldl' assignOne {
assigned = {};
used = init;
}
@ -244,15 +470,15 @@
# Checks whether the given address (with or without cidr notation) is an ipv4 address.
isv4 = x: !isv6 x;
# Checks whether the given address (with or without cidr notation) is an ipv6 address.
isv6 = lib.hasInfix ":";
isv6 = hasInfix ":";
};
mac = {
# Adds offset to the given base address and ensures the result is in
# a locally administered range by replacing the second nibble with a 2.
addPrivate = base: offset: let
added = libWithNet.net.mac.add base offset;
pre = lib.substring 0 1 added;
suf = lib.substring 2 (-1) added;
pre = substring 0 1 added;
suf = substring 2 (-1) added;
in "${pre}2${suf}";
# assignMacs :: mac (base) -> int (size) -> [int | mac] (reserved) -> [string] (hosts) -> [mac]
#
@ -272,10 +498,10 @@
# > net.mac.assignMacs "11:22:33:00:00:00" 24 ["11:22:33:1b:bd:ca"] ["a" "b" "c"]
# { a = "11:22:33:1b:bd:cb"; b = "11:22:33:39:59:4a"; c = "11:22:33:50:7a:e2"; }
assignMacs = base: size: reserved: hosts: let
capacity = extraLib.pow 2 size;
capacity = misc.pow 2 size;
baseAsInt = libWithNet.net.mac.diff base "00:00:00:00:00:00";
init = lib.unique (
lib.flip map reserved (x:
init = unique (
flip map reserved (x:
if builtins.typeOf x == "int"
then x
else libWithNet.net.mac.diff x base)
@ -284,17 +510,17 @@
nInit = builtins.length init;
# Pre-sort all hosts, to ensure ordering invariance
sortedHosts =
lib.warnIf
warnIf
((nInit + nHosts) > 0.3 * capacity)
"assignMacs: hash stability may be degraded since utilization is >30%"
(builtins.sort builtins.lessThan hosts);
# Generates a hash (i.e. offset value) for a given hostname
hashElem = x:
builtins.bitAnd (capacity - 1)
(extraLib.hexToDec (builtins.substring 0 16 (builtins.hashString "sha256" x)));
(misc.hexToDec (builtins.substring 0 16 (builtins.hashString "sha256" x)));
# Do linear probing. Returns the first unused value at or after the given value.
probe = avoid: value:
if lib.elem value avoid
if elem value avoid
# TODO lib.mod
# Poor man's modulo, because nix has no modulo. Luckily we operate on a residue
# class of x modulo 2^n, so we can use bitAnd instead.
@ -315,14 +541,14 @@
used = [value] ++ used;
};
in
assert lib.assertMsg (size >= 2 && size <= 62)
assert assertMsg (size >= 2 && size <= 62)
"assignMacs: size=${toString size} is not in [2, 62].";
assert lib.assertMsg (builtins.bitAnd (capacity - 1) baseAsInt == 0)
assert assertMsg (builtins.bitAnd (capacity - 1) baseAsInt == 0)
"assignMacs: the size=${toString size} least significant bits of the base mac address must be 0.";
assert lib.assertMsg (nHosts <= capacity - nInit)
assert assertMsg (nHosts <= capacity - nInit)
"assignMacs: number of hosts (${toString nHosts}) must be <= capacity (${toString capacity}) - reserved (${toString nInit})";
# Assign an ip in the subnet to each element, in order
(lib.foldl' assignOne {
(foldl' assignOne {
assigned = {};
used = init;
}

View file

@ -1,13 +1,11 @@
{
config,
lib,
nodeName,
...
}: {
systemd.network.enable = true;
networking = {
hostName = nodeName;
useDHCP = lib.mkForce false;
useNetworkd = true;
dhcpcd.enable = false;

View file

@ -1,7 +1,6 @@
{
inputs,
pkgs,
stateVersion,
...
}: {
environment.etc."nixos/configuration.nix".source = pkgs.writeText "configuration.nix" ''
@ -53,6 +52,6 @@
extraSystemBuilderCmds = ''
ln -sv ${pkgs.path} $out/nixpkgs
'';
inherit stateVersion;
stateVersion = "23.11";
};
}

View file

@ -1,17 +1,17 @@
{
config,
inputs,
lib,
nodePath,
...
}: {
# Define local repo secrets
repo.secretFiles = let
local = nodePath + "/secrets/local.nix.age";
local = config.node.secretsDir + "/local.nix.age";
in
{
global = ../../secrets/global.nix.age;
}
// lib.optionalAttrs (nodePath != null && lib.pathExists local) {inherit local;};
// lib.optionalAttrs (lib.pathExists local) {inherit local;};
# Setup secret rekeying parameters
age.rekey = {
@ -24,13 +24,7 @@
# This is technically impure, but intended. We need to rekey on the
# current system due to yubikey availability.
forceRekeyOnSystem = builtins.extraBuiltins.unsafeCurrentSystem;
hostPubkey = let
pubkeyPath =
if nodePath == null
then null
else nodePath + "/secrets/host.pub";
in
lib.mkIf (pubkeyPath != null && lib.pathExists pubkeyPath) pubkeyPath;
hostPubkey = config.node.secretsDir + "/host.pub";
};
age.generators.dhparams.script = {pkgs, ...}: "${pkgs.openssl}/bin/openssl dhparam 4096";

View file

@ -1,11 +1,7 @@
{
config,
extraLib,
inputs,
lib,
microvm,
nodeName,
nodePath,
pkgs,
utils,
...
@ -36,7 +32,8 @@
parentConfig = config;
cfg = config.meta.microvms;
inherit (config.meta.microvms) vms;
nodeName = config.repo.node.name;
inherit (cfg) vms;
inherit (config.lib) net;
# Configuration for each microvm
@ -44,7 +41,7 @@
# Add the required datasets to the disko configuration of the machine
disko.devices.zpool = mkIf vmCfg.zfs.enable {
${vmCfg.zfs.pool}.datasets."${vmCfg.zfs.dataset}" =
extraLib.disko.zfs.filesystem vmCfg.zfs.mountpoint;
config.lib.disko.zfs.filesystem vmCfg.zfs.mountpoint;
};
# Ensure that the zfs dataset exists before it is mounted.
@ -94,8 +91,9 @@
nodes = mkMerge config.microvm.vms.${vmName}.config.options.nodes.definitions;
microvm.vms.${vmName} = let
node = import ../../nix/generate-node.nix inputs vmCfg.nodeName {
inherit (vmCfg) system configPath;
node = import ../../nix/generate-node.nix inputs {
name = vmCfg.nodeName;
inherit (vmCfg) system;
};
mac = (net.mac.assignMacs "02:01:27:00:00:00" 24 [] (attrNames vms)).${vmName};
in {
@ -217,7 +215,7 @@
in {
imports = [
# Add the host module, but only enable if it necessary
microvm.host
inputs.microvm.nixosModules.host
# This is opt-out, so we can't put this into the mkIf below
{microvm.host.enable = vms != {};}
];
@ -289,26 +287,6 @@ in {
'';
};
configPath = mkOption {
type = types.nullOr types.path;
default =
if nodePath != null && builtins.pathExists (nodePath + "/microvms/${name}")
then nodePath + "/microvms/${name}"
else null;
description = mdDoc ''
The main configuration directory for this microvm. If not-null, the given
directory will automatically be imported as system configuration. It will
become the nodePath for the microvm meaning that some machine-specific files
may be referenced there automatically (for example host.pub).
This can also be set to a file, which will then simply be used as the main
import for configuration, without setting a nodePath.
By default this will be set to the current node's <nodePath>/microvms/<vmname>
if the current nodePath is non-null and the directory exists.
'';
};
networking = {
mainLinkName = mkOption {
type = types.str;
@ -378,6 +356,6 @@ in {
};
};
}
// extraLib.mergeToplevelConfigs ["nodes" "disko" "microvm" "systemd"] (mapAttrsToList microvmConfig vms)
// config.lib.misc.mergeToplevelConfigs ["nodes" "disko" "microvm" "systemd"] (mapAttrsToList microvmConfig vms)
);
}

View file

@ -1,7 +1,6 @@
{
config,
lib,
nodePath,
...
}: let
inherit
@ -37,7 +36,7 @@ in {
config = mkIf config.services.nginx.enable {
age.secrets."dhparams.pem" = {
rekeyFile = nodePath + "/secrets/dhparams.pem.age";
rekeyFile = config.node.secretsDir + "/dhparams.pem.age";
generator = "dhparams";
mode = "440";
group = "nginx";

View file

@ -1,8 +1,6 @@
{
config,
lib,
nodeName,
nodePath,
nodes,
...
}: let
@ -27,7 +25,7 @@ in {
config = mkIf cfg.enable {
age.secrets.promtail-loki-basic-auth-password = {
rekeyFile = nodePath + "/secrets/promtail-loki-basic-auth-password.age";
rekeyFile = config.node.secretsDir + "/promtail-loki-basic-auth-password.age";
generator = "alnum";
mode = "440";
group = "promtail";
@ -48,7 +46,7 @@ in {
clients = [
{
basic_auth.username = "${nodeName}+promtail-loki-basic-auth-password";
basic_auth.username = "${config.repo.node.name}+promtail-loki-basic-auth-password";
basic_auth.password_file = config.age.secrets.promtail-loki-basic-auth-password.path;
url = "https://${nodes.${cfg.proxy}.config.networking.providedDomains.loki}/loki/api/v1/push";
}

View file

@ -1,8 +1,6 @@
{
config,
lib,
nodeName,
nodePath,
nodes,
pkgs,
...
@ -18,6 +16,7 @@
;
cfg = config.meta.telegraf;
nodeName = config.repo.node.name;
in {
options.meta.telegraf = {
enable = mkEnableOption (mdDoc "telegraf to push metrics to influx.");
@ -42,7 +41,7 @@ in {
config = mkIf cfg.enable {
age.secrets.telegraf-influxdb-token = {
rekeyFile = nodePath + "/secrets/telegraf-influxdb-token.age";
rekeyFile = config.node.secretsDir + "/telegraf-influxdb-token.age";
# TODO generator.script = { pkgs, lib, decrypt, deps, ... }: let
# TODO adminBasicAuth = (builtins.head deps).file;
# TODO adminToken = (builtins.head deps).file; # TODO ..... filter by name?

View file

@ -1,54 +1,221 @@
{
config,
inputs,
lib,
extraLib,
nodes,
pkgs,
nodeName,
...
}: let
inherit
(lib)
any
assertMsg
attrNames
attrValues
concatLists
concatMap
concatMapStrings
concatStringsSep
escapeShellArg
filter
filterAttrs
flatten
flip
genAttrs
head
mapAttrs'
mapAttrsToList
mdDoc
mergeAttrs
mkForce
mkIf
mkMerge
mkOption
nameValuePair
optionalAttrs
optionals
partition
removeSuffix
stringLength
types
;
inherit
(extraLib)
(config.lib.misc)
concatAttrs
duplicates
mergeToplevelConfigs
;
inherit
(extraLib.types)
(config.lib.types)
lazyOf
lazyValue
;
inherit (config.lib) net;
cfg = config.meta.wireguard;
nodeName = config.repo.node.name;
libFor = wgName: rec {
# Returns the given node's wireguard configuration of this network
wgCfgOf = node: nodes.${node}.config.meta.wireguard.${wgName};
sortedPeers = peerA: peerB:
if peerA < peerB
then {
peer1 = peerA;
peer2 = peerB;
}
else {
peer1 = peerB;
peer2 = peerA;
};
peerPublicKeyFile = peerName: "/secrets/wireguard/${wgName}/keys/${peerName}.pub";
peerPublicKeyPath = peerName: inputs.self.outPath + peerPublicKeyFile peerName;
peerPrivateKeyFile = peerName: "/secrets/wireguard/${wgName}/keys/${peerName}.age";
peerPrivateKeyPath = peerName: inputs.self.outPath + peerPrivateKeyFile peerName;
peerPrivateKeySecret = peerName: "wireguard-${wgName}-priv-${peerName}";
peerPresharedKeyFile = peerA: peerB: let
inherit (sortedPeers peerA peerB) peer1 peer2;
in "/secrets/wireguard/${wgName}/psks/${peer1}+${peer2}.age";
peerPresharedKeyPath = peerA: peerB: inputs.self.outPath + peerPresharedKeyFile peerA peerB;
peerPresharedKeySecret = peerA: peerB: let
inherit (sortedPeers peerA peerB) peer1 peer2;
in "wireguard-${wgName}-psks-${peer1}+${peer2}";
# All nodes that are part of this network
participatingNodes =
filter
(n: builtins.hasAttr wgName nodes.${n}.config.meta.wireguard)
(attrNames nodes);
# Partition nodes by whether they are servers
_participatingNodes_isServerPartition =
partition
(n: (wgCfgOf n).server.host != null)
participatingNodes;
participatingServerNodes = _participatingNodes_isServerPartition.right;
participatingClientNodes = _participatingNodes_isServerPartition.wrong;
# Maps all nodes that are part of this network to their addresses
nodePeers = genAttrs participatingNodes (n: (wgCfgOf n).addresses);
externalPeerName = p: "external-${p}";
# Only peers that are defined as externalPeers on the given node.
# Prepends "external-" to their name.
externalPeersForNode = node:
mapAttrs' (p: nameValuePair (externalPeerName p)) (wgCfgOf node).server.externalPeers;
# All peers that are defined as externalPeers on any node.
# Prepends "external-" to their name.
allExternalPeers = concatAttrs (map externalPeersForNode participatingNodes);
# All peers that are part of this network
allPeers = nodePeers // allExternalPeers;
# Concatenation of all external peer names names without any transformations.
externalPeerNamesRaw = concatMap (n: attrNames (wgCfgOf n).server.externalPeers) participatingNodes;
# A list of all occurring addresses.
usedAddresses =
concatMap (n: (wgCfgOf n).addresses) participatingNodes
++ flatten (concatMap (n: attrValues (wgCfgOf n).server.externalPeers) participatingNodes);
# A list of all occurring addresses, but only includes addresses that
# are not assigned automatically.
explicitlyUsedAddresses =
flip concatMap participatingNodes
(n:
filter (x: !types.isLazyValue x)
(concatLists
(nodes.${n}.options.meta.wireguard.type.functor.wrapped.getSubOptions (wgCfgOf n)).addresses.definitions))
++ flatten (concatMap (n: attrValues (wgCfgOf n).server.externalPeers) participatingNodes);
# The cidrv4 and cidrv6 of the network spanned by all participating peer addresses.
# This also takes into account any reserved address ranges that should be part of the network.
networkAddresses =
net.cidr.merge (usedAddresses
++ concatMap (n: (wgCfgOf n).server.reservedAddresses) participatingServerNodes);
# The network spanning cidr addresses. The respective cidrv4 and cirdv6 are only
# included if they exist.
networkCidrs = filter (x: x != null) (attrValues networkAddresses);
# The cidrv4 and cidrv6 of the network spanned by all reserved addresses only.
# Used to determine automatically assigned addresses first.
spannedReservedNetwork =
net.cidr.merge (concatMap (n: (wgCfgOf n).server.reservedAddresses) participatingServerNodes);
# Assigns an ipv4 address from spannedReservedNetwork.cidrv4
# to each participant that has not explicitly specified an ipv4 address.
assignedIpv4Addresses = assert assertMsg
(spannedReservedNetwork.cidrv4 != null)
"Wireguard network '${wgName}': At least one participating node must reserve a cidrv4 address via `reservedAddresses` so that ipv4 addresses can be assigned automatically from that network.";
net.cidr.assignIps
spannedReservedNetwork.cidrv4
# Don't assign any addresses that are explicitly configured on other hosts
(filter (x: net.cidr.contains x spannedReservedNetwork.cidrv4) (filter net.ip.isv4 explicitlyUsedAddresses))
participatingNodes;
# Assigns an ipv4 address from spannedReservedNetwork.cidrv4
# to each participant that has not explicitly specified an ipv4 address.
assignedIpv6Addresses = assert assertMsg
(spannedReservedNetwork.cidrv6 != null)
"Wireguard network '${wgName}': At least one participating node must reserve a cidrv6 address via `reservedAddresses` so that ipv4 addresses can be assigned automatically from that network.";
net.cidr.assignIps
spannedReservedNetwork.cidrv6
# Don't assign any addresses that are explicitly configured on other hosts
(filter (x: net.cidr.contains x spannedReservedNetwork.cidrv6) (filter net.ip.isv6 explicitlyUsedAddresses))
participatingNodes;
# Appends / replaces the correct cidr length to the argument,
# so that the resulting address is in the cidr.
toNetworkAddr = addr: let
relevantNetworkAddr =
if net.ip.isv6 addr
then networkAddresses.cidrv6
else networkAddresses.cidrv4;
in "${net.cidr.ip addr}/${toString (net.cidr.length relevantNetworkAddr)}";
# Creates a script that when executed outputs a wg-quick compatible configuration
# file for use with external peers. This is a script so we can access secrets without
# storing them in the nix-store.
wgQuickConfigScript = system: serverNode: extPeer: let
pkgs = inputs.self.pkgs.${system};
snCfg = wgCfgOf serverNode;
peerName = externalPeerName extPeer;
addresses = map toNetworkAddr snCfg.server.externalPeers.${extPeer};
in
pkgs.writeShellScript "create-wg-conf-${wgName}-${serverNode}-${extPeer}" ''
privKey=$(${pkgs.rage}/bin/rage -d ${config.lib.secrets.rageDecryptArgs} ${escapeShellArg (peerPrivateKeyPath peerName)}) \
|| { echo "error: Failed to decrypt!" >&2; exit 1; }
serverPsk=$(${pkgs.rage}/bin/rage -d ${config.lib.secrets.rageDecryptArgs} ${escapeShellArg (peerPresharedKeyPath serverNode peerName)}) \
|| { echo "error: Failed to decrypt!" >&2; exit 1; }
cat <<EOF
[Interface]
Address = ${concatStringsSep ", " addresses}
PrivateKey = $privKey
[Peer]
PublicKey = ${removeSuffix "\n" (builtins.readFile (peerPublicKeyPath serverNode))}
PresharedKey = $serverPsk
AllowedIPs = ${concatStringsSep ", " networkCidrs}
Endpoint = ${snCfg.server.host}:${toString snCfg.server.port}
PersistentKeepalive = 25
EOF
'';
};
configForNetwork = wgName: wgCfg: let
inherit
(extraLib.wireguard wgName)
(libFor wgName)
externalPeerName
externalPeerNamesRaw
networkCidrs
@ -365,7 +532,7 @@ in {
ipv4 = mkOption {
type = lazyOf net.types.ipv4;
default = lazyValue (extraLib.wireguard name).assignedIpv4Addresses.${nodeName};
default = lazyValue (libFor name).assignedIpv4Addresses.${nodeName};
description = mdDoc ''
The ipv4 address for this machine. If you do not set this explicitly,
a semi-stable ipv4 address will be derived automatically based on the
@ -377,7 +544,7 @@ in {
ipv6 = mkOption {
type = lazyOf net.types.ipv6;
default = lazyValue (extraLib.wireguard name).assignedIpv6Addresses.${nodeName};
default = lazyValue (libFor name).assignedIpv6Addresses.${nodeName};
description = mdDoc ''
The ipv6 address for this machine. If you do not set this explicitly,
a semi-stable ipv6 address will be derived automatically based on the

View file

@ -1,7 +1,6 @@
# Provides an option to easily rename interfaces by their mac addresses.
{
config,
extraLib,
lib,
pkgs,
...
@ -35,7 +34,7 @@ in {
config = lib.mkIf (cfg != {}) {
assertions = let
duplicateMacs = extraLib.duplicates (attrValues cfg);
duplicateMacs = config.lib.misc.duplicates (attrValues cfg);
in [
{
assertion = duplicateMacs == [];

View file

@ -1,12 +1,6 @@
{
lib,
config,
nixos-hardware,
pkgs,
...
}: {
{inputs, ...}: {
imports = [
nixos-hardware.common-pc-ssd
inputs.nixos-hardware.nixosModules.common-pc-ssd
./physical.nix
];

View file

@ -1,11 +1,10 @@
{
config,
pkgs,
nodePath,
...
}: {
age.secrets.initrd_host_ed25519_key = {
rekeyFile = nodePath + "/secrets/initrd_host_ed25519_key.age";
rekeyFile = config.node.secretsDir + "/initrd_host_ed25519_key.age";
# Generate only an ssh-ed25519 private key
generator.script = {
pkgs,

View file

@ -27,4 +27,19 @@
services.telegraf.extraConfig.inputs = lib.mkIf config.services.telegraf.enable {
zfs.poolMetrics = true;
};
# TODO remove once this is upstreamed
boot.initrd.systemd.services."zfs-import-rpool".after = ["cryptsetup.target"];
# After importing the rpool, rollback the root system to be empty.
boot.initrd.systemd.services.impermanence-root = {
wantedBy = ["initrd.target"];
after = ["zfs-import-rpool.service"];
before = ["sysroot.mount"];
unitConfig.DefaultDependencies = "no";
serviceConfig = {
Type = "oneshot";
ExecStart = "${pkgs.zfs}/bin/zfs rollback -r rpool/local/root@blank";
};
};
}

View file

@ -1,9 +1,7 @@
{
config,
extraLib,
inputs,
lib,
nodeName,
colmenaNodes,
...
}: let
inherit
@ -18,10 +16,7 @@
types
;
inherit
(extraLib)
mergeToplevelConfigs
;
nodeName = config.repo.node.name;
in {
options.nodes = mkOption {
type = types.attrsOf (mkOptionType {
@ -33,12 +28,12 @@ in {
};
config = let
allNodes = attrNames colmenaNodes;
allNodes = attrNames inputs.self.colmenaNodes;
isColmenaNode = elem nodeName allNodes;
foreignConfigs = concatMap (n: colmenaNodes.${n}.config.nodes.${nodeName} or []) allNodes;
foreignConfigs = concatMap (n: inputs.self.colmenaNodes.${n}.config.nodes.${nodeName} or []) allNodes;
toplevelAttrs = ["age" "networking" "systemd" "services"];
in
optionalAttrs isColmenaNode (mergeToplevelConfigs toplevelAttrs (
optionalAttrs isColmenaNode (config.lib.misc.mergeToplevelConfigs toplevelAttrs (
foreignConfigs
# Also allow extending ourselves, in case some attributes from depenent
# configurations such as containers or microvms are merged to the host

View file

@ -1,19 +1,31 @@
{}
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
{
config,
lib,
...
}: let
inherit
(lib)
mdDoc
mkDefault
mkOption
types
;
cfg = config.node;
in {
options.node = {
name = mkOption {
description = mdDoc "A unique name for this node (host) in the repository. Defines the default hostname, but this can be overwritten.";
type = types.str;
};
secretsDir = mkOption {
description = mdDoc "Path to the secrets directory for this node.";
type = types.path;
};
};
config = {
networking.hostName = mkDefault config.node.name;
};
}

View file

@ -6,18 +6,21 @@
inherit
(nixpkgs.lib)
filterAttrs
flip
mapAttrs
;
nixosNodes = filterAttrs (_: x: x.type == "nixos") self.hosts;
nodes =
mapAttrs
(n: v: import ./generate-node.nix inputs n ({configPath = ../hosts/${n};} // v))
nixosNodes;
nodes = flip mapAttrs nixosNodes (name: hostCfg:
import ./generate-node.nix inputs {
inherit name;
inherit (hostCfg) system;
modules = [../hosts/${name}];
});
in
{
meta = {
description = "oddlama's colmena configuration";
description = "";
# Just a required dummy for colmena, overwritten on a per-node basis by nodeNixpkgs below.
nixpkgs = self.pkgs.x86_64-linux;
nodeNixpkgs = mapAttrs (_: node: node.pkgs) nodes;

View file

@ -7,35 +7,34 @@
home-manager,
impermanence,
microvm,
nixos-hardware,
nixos-nftables-firewall,
nixpkgs,
...
} @ inputs: nodeName: {configPath ? null, ...} @ nodeMeta: let
inherit (nixpkgs.lib) optional pathIsDirectory;
in {
inherit (nodeMeta) system;
pkgs = self.pkgs.${nodeMeta.system};
} @ inputs: {
# The name of the generated node
name,
# Additional modules that should be imported
modules ? [],
# The system in use
system,
...
}: {
inherit system;
pkgs = self.pkgs.${system};
specialArgs = {
inherit (nixpkgs) lib;
inherit (self) extraLib nodes stateVersion colmenaNodes;
inherit inputs nodeName;
# Only set the nodePath if it is an actual directory
nodePath =
if builtins.isPath configPath && pathIsDirectory configPath
then configPath
else null;
nixos-hardware = nixos-hardware.nixosModules;
microvm = microvm.nixosModules;
inherit (self) nodes;
inherit inputs;
};
imports =
[
modules
++ [
{repo.node.name = name;}
agenix.nixosModules.default
agenix-rekey.nixosModules.default
disko.nixosModules.disko
home-manager.nixosModules.default
impermanence.nixosModules.impermanence
nixos-nftables-firewall.nixosModules.default
]
++ optional (configPath != null) configPath;
];
}

View file

@ -1,386 +0,0 @@
{
self,
nixpkgs,
...
}: let
inherit
(nixpkgs.lib)
all
assertMsg
attrNames
attrValues
concatLists
concatMap
concatMapStrings
concatStringsSep
elem
escapeShellArg
filter
flatten
flip
foldAttrs
foldl'
genAttrs
genList
head
isAttrs
mapAttrs'
mergeAttrs
mkMerge
mkOptionType
nameValuePair
optionalAttrs
partition
recursiveUpdate
removeSuffix
showOption
stringToCharacters
substring
unique
;
in rec {
types = rec {
# Checks whether the value is a lazy value without causing
# it's value to be evaluated
isLazyValue = x: isAttrs x && x ? _lazyValue;
# Constructs a lazy value holding the given value.
lazyValue = value: {_lazyValue = value;};
# Represents a lazy value of the given type, which
# holds the actual value as an attrset like { _lazyValue = <actual value>; }.
# This allows the option to be defined and filtered from a defintion
# list without evaluating the value.
lazyValueOf = type:
mkOptionType rec {
name = "lazyValueOf ${type.name}";
inherit (type) description descriptionClass emptyValue getSubOptions getSubModules;
check = isLazyValue;
merge = loc: defs:
assert assertMsg
(all (x: type.check x._lazyValue) defs)
"The option `${showOption loc}` is defined with a lazy value holding an invalid type";
nixpkgs.lib.types.mergeOneOption loc defs;
substSubModules = m: nixpkgs.lib.types.uniq (type.substSubModules m);
functor = (nixpkgs.lib.types.defaultFunctor name) // {wrapped = type;};
nestedTypes.elemType = type;
};
# Represents a value or lazy value of the given type that will
# automatically be coerced to the given type when merged.
lazyOf = type: nixpkgs.lib.types.coercedTo (lazyValueOf type) (x: x._lazyValue) type;
};
# Counts how often each element occurrs in xs
countOccurrences = let
addOrUpdate = acc: x:
acc // {${x} = (acc.${x} or 0) + 1;};
in
foldl' addOrUpdate {};
# Returns all elements in xs that occur at least twice
duplicates = xs: let
occurrences = countOccurrences xs;
in
unique (filter (x: occurrences.${x} > 1) xs);
# Concatenates all given attrsets as if calling a // b in order.
concatAttrs = foldl' mergeAttrs {};
# True if the path or string starts with /
isAbsolutePath = x: substring 0 1 x == "/";
# Merges all given attributes from the given attrsets using mkMerge.
# Useful to merge several top-level configs in a module.
mergeToplevelConfigs = keys: attrs:
genAttrs keys (attr: mkMerge (map (x: x.${attr} or {}) attrs));
# Calculates base^exp, but careful, this overflows for results > 2^62
pow = base: exp: foldl' (a: x: x * a) 1 (genList (_: base) exp);
# Converts the given hex string to an integer. Only reliable for inputs in [0, 2^63),
# after that the sign bit will overflow.
hexToDec = v: let
literalValues = {
"0" = 0;
"1" = 1;
"2" = 2;
"3" = 3;
"4" = 4;
"5" = 5;
"6" = 6;
"7" = 7;
"8" = 8;
"9" = 9;
"a" = 10;
"b" = 11;
"c" = 12;
"d" = 13;
"e" = 14;
"f" = 15;
"A" = 10;
"B" = 11;
"C" = 12;
"D" = 13;
"E" = 14;
"F" = 15;
};
in
foldl' (acc: x: acc * 16 + literalValues.${x}) 0 (stringToCharacters v);
disko = {
gpt = {
partGrub = name: start: end: {
inherit name start end;
part-type = "primary";
flags = ["bios_grub"];
};
partEfi = name: start: end: {
inherit name start end;
fs-type = "fat32";
bootable = true;
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
};
};
partSwap = name: start: end: {
inherit name start end;
fs-type = "linux-swap";
content = {
type = "swap";
randomEncryption = true;
};
};
partLuksZfs = name: start: end: {
inherit start end;
name = "enc-${name}";
content = {
type = "luks";
name = "enc-${name}";
extraOpenArgs = ["--allow-discards"];
content = {
type = "zfs";
pool = name;
};
};
};
};
zfs = rec {
defaultZpoolOptions = {
type = "zpool";
mountRoot = "/mnt";
rootFsOptions = {
compression = "zstd";
acltype = "posix";
atime = "off";
xattr = "sa";
dnodesize = "auto";
mountpoint = "none";
canmount = "off";
devices = "off";
};
options.ashift = "12";
};
defaultZfsDatasets = {
"local" = unmountable;
"local/root" =
filesystem "/"
// {
postCreateHook = "zfs snapshot rpool/local/root@blank";
};
"local/nix" = filesystem "/nix";
"local/state" = filesystem "/state";
"safe" = unmountable;
"safe/persist" = filesystem "/persist";
};
unmountable = {type = "zfs_fs";};
filesystem = mountpoint: {
type = "zfs_fs";
options = {
canmount = "noauto";
inherit mountpoint;
};
# Required to add dependencies for initrd
inherit mountpoint;
};
};
};
rageMasterIdentityArgs = concatMapStrings (x: ''-i ${escapeShellArg x} '') self.secretsConfig.masterIdentities;
rageExtraEncryptionPubkeys =
concatMapStrings (
x:
if isAbsolutePath x
then ''-R ${escapeShellArg x} ''
else ''-r ${escapeShellArg x} ''
)
self.secretsConfig.extraEncryptionPubkeys;
# The arguments required to de-/encrypt a secret in this repository
rageDecryptArgs = "${rageMasterIdentityArgs}";
rageEncryptArgs = "${rageMasterIdentityArgs} ${rageExtraEncryptionPubkeys}";
# TODO merge this into a _meta readonly option in the wireguard module
# Wireguard related functions that are reused in several files of this flake
wireguard = wgName: rec {
# Get access to the networking lib by referring to one of the participating nodes.
# Not ideal, but ok.
inherit (self.nodes.${head participatingNodes}.config.lib) net;
# Returns the given node's wireguard configuration of this network
wgCfgOf = node: self.nodes.${node}.config.meta.wireguard.${wgName};
sortedPeers = peerA: peerB:
if peerA < peerB
then {
peer1 = peerA;
peer2 = peerB;
}
else {
peer1 = peerB;
peer2 = peerA;
};
peerPublicKeyFile = peerName: "secrets/wireguard/${wgName}/keys/${peerName}.pub";
peerPublicKeyPath = peerName: "${self.outPath}/" + peerPublicKeyFile peerName;
peerPrivateKeyFile = peerName: "secrets/wireguard/${wgName}/keys/${peerName}.age";
peerPrivateKeyPath = peerName: "${self.outPath}/" + peerPrivateKeyFile peerName;
peerPrivateKeySecret = peerName: "wireguard-${wgName}-priv-${peerName}";
peerPresharedKeyFile = peerA: peerB: let
inherit (sortedPeers peerA peerB) peer1 peer2;
in "secrets/wireguard/${wgName}/psks/${peer1}+${peer2}.age";
peerPresharedKeyPath = peerA: peerB: "${self.outPath}/" + peerPresharedKeyFile peerA peerB;
peerPresharedKeySecret = peerA: peerB: let
inherit (sortedPeers peerA peerB) peer1 peer2;
in "wireguard-${wgName}-psks-${peer1}+${peer2}";
# All nodes that are part of this network
participatingNodes =
filter
(n: builtins.hasAttr wgName self.nodes.${n}.config.meta.wireguard)
(attrNames self.nodes);
# Partition nodes by whether they are servers
_participatingNodes_isServerPartition =
partition
(n: (wgCfgOf n).server.host != null)
participatingNodes;
participatingServerNodes = _participatingNodes_isServerPartition.right;
participatingClientNodes = _participatingNodes_isServerPartition.wrong;
# Maps all nodes that are part of this network to their addresses
nodePeers = genAttrs participatingNodes (n: (wgCfgOf n).addresses);
externalPeerName = p: "external-${p}";
# Only peers that are defined as externalPeers on the given node.
# Prepends "external-" to their name.
externalPeersForNode = node:
mapAttrs' (p: nameValuePair (externalPeerName p)) (wgCfgOf node).server.externalPeers;
# All peers that are defined as externalPeers on any node.
# Prepends "external-" to their name.
allExternalPeers = concatAttrs (map externalPeersForNode participatingNodes);
# All peers that are part of this network
allPeers = nodePeers // allExternalPeers;
# Concatenation of all external peer names names without any transformations.
externalPeerNamesRaw = concatMap (n: attrNames (wgCfgOf n).server.externalPeers) participatingNodes;
# A list of all occurring addresses.
usedAddresses =
concatMap (n: (wgCfgOf n).addresses) participatingNodes
++ flatten (concatMap (n: attrValues (wgCfgOf n).server.externalPeers) participatingNodes);
# A list of all occurring addresses, but only includes addresses that
# are not assigned automatically.
explicitlyUsedAddresses =
flip concatMap participatingNodes
(n:
filter (x: !types.isLazyValue x)
(concatLists
(self.nodes.${n}.options.meta.wireguard.type.functor.wrapped.getSubOptions (wgCfgOf n)).addresses.definitions))
++ flatten (concatMap (n: attrValues (wgCfgOf n).server.externalPeers) participatingNodes);
# The cidrv4 and cidrv6 of the network spanned by all participating peer addresses.
# This also takes into account any reserved address ranges that should be part of the network.
networkAddresses =
net.cidr.merge (usedAddresses
++ concatMap (n: (wgCfgOf n).server.reservedAddresses) participatingServerNodes);
# The network spanning cidr addresses. The respective cidrv4 and cirdv6 are only
# included if they exist.
networkCidrs = filter (x: x != null) (attrValues networkAddresses);
# The cidrv4 and cidrv6 of the network spanned by all reserved addresses only.
# Used to determine automatically assigned addresses first.
spannedReservedNetwork =
net.cidr.merge (concatMap (n: (wgCfgOf n).server.reservedAddresses) participatingServerNodes);
# Assigns an ipv4 address from spannedReservedNetwork.cidrv4
# to each participant that has not explicitly specified an ipv4 address.
assignedIpv4Addresses = assert assertMsg
(spannedReservedNetwork.cidrv4 != null)
"Wireguard network '${wgName}': At least one participating node must reserve a cidrv4 address via `reservedAddresses` so that ipv4 addresses can be assigned automatically from that network.";
net.cidr.assignIps
spannedReservedNetwork.cidrv4
# Don't assign any addresses that are explicitly configured on other hosts
(filter (x: net.cidr.contains x spannedReservedNetwork.cidrv4) (filter net.ip.isv4 explicitlyUsedAddresses))
participatingNodes;
# Assigns an ipv4 address from spannedReservedNetwork.cidrv4
# to each participant that has not explicitly specified an ipv4 address.
assignedIpv6Addresses = assert assertMsg
(spannedReservedNetwork.cidrv6 != null)
"Wireguard network '${wgName}': At least one participating node must reserve a cidrv6 address via `reservedAddresses` so that ipv4 addresses can be assigned automatically from that network.";
net.cidr.assignIps
spannedReservedNetwork.cidrv6
# Don't assign any addresses that are explicitly configured on other hosts
(filter (x: net.cidr.contains x spannedReservedNetwork.cidrv6) (filter net.ip.isv6 explicitlyUsedAddresses))
participatingNodes;
# Appends / replaces the correct cidr length to the argument,
# so that the resulting address is in the cidr.
toNetworkAddr = addr: let
relevantNetworkAddr =
if net.ip.isv6 addr
then networkAddresses.cidrv6
else networkAddresses.cidrv4;
in "${net.cidr.ip addr}/${toString (net.cidr.length relevantNetworkAddr)}";
# Creates a script that when executed outputs a wg-quick compatible configuration
# file for use with external peers. This is a script so we can access secrets without
# storing them in the nix-store.
wgQuickConfigScript = system: serverNode: extPeer: let
pkgs = self.pkgs.${system};
snCfg = wgCfgOf serverNode;
peerName = externalPeerName extPeer;
addresses = map toNetworkAddr snCfg.server.externalPeers.${extPeer};
in
pkgs.writeShellScript "create-wg-conf-${wgName}-${serverNode}-${extPeer}" ''
privKey=$(${pkgs.rage}/bin/rage -d ${rageDecryptArgs} ${escapeShellArg (peerPrivateKeyPath peerName)}) \
|| { echo "error: Failed to decrypt!" >&2; exit 1; }
serverPsk=$(${pkgs.rage}/bin/rage -d ${rageDecryptArgs} ${escapeShellArg (peerPresharedKeyPath serverNode peerName)}) \
|| { echo "error: Failed to decrypt!" >&2; exit 1; }
cat <<EOF
[Interface]
Address = ${concatStringsSep ", " addresses}
PrivateKey = $privKey
[Peer]
PublicKey = ${removeSuffix "\n" (builtins.readFile (peerPublicKeyPath serverNode))}
PresharedKey = $serverPsk
AllowedIPs = ${concatStringsSep ", " networkCidrs}
Endpoint = ${snCfg.server.host}:${toString snCfg.server.port}
PersistentKeepalive = 25
EOF
'';
};
}