refactor: major refactor into proper reusable modules. No logical changes.

This commit is contained in:
oddlama 2023-06-29 00:27:54 +02:00
parent 04872f6ec5
commit 84ac34cb6c
No known key found for this signature in database
GPG key ID: 14EFE510775FE39A
80 changed files with 761 additions and 776 deletions

23
modules/config/boot.nix Normal file
View file

@ -0,0 +1,23 @@
{
config,
lib,
pkgs,
...
}: {
boot = {
initrd.systemd = {
enable = true;
emergencyAccess = config.repo.secrets.global.root.hashedPassword;
# TODO good idea? targets.emergency.wants = ["network.target" "sshd.service"];
extraBin.ip = "${pkgs.iproute2}/bin/ip";
};
# NOTE: Add "rd.systemd.unit=rescue.target" to debug initrd
kernelParams = ["log_buf_len=10M"];
tmp.useTmpfs = true;
loader.timeout = lib.mkDefault 2;
};
console.earlySetup = true;
}

View file

@ -0,0 +1,12 @@
{
home-manager = {
useGlobalPkgs = true;
useUserPackages = true;
verbose = true;
};
# Required even when using home-manager's zsh module since the /etc/profile load order
# is partly controlled by this. See nix-community/home-manager#3681.
# TODO remove once we have nushell
programs.zsh.enable = true;
}

View file

@ -0,0 +1,160 @@
{
config,
lib,
...
}: {
# Give agenix access to the hostkey independent of impermanence activation
age.identityPaths = ["/persist/etc/ssh/ssh_host_ed25519_key"];
# State that should be kept across reboots, but is otherwise
# NOT important information in any way that needs to be backed up.
environment.persistence."/state" = {
hideMounts = true;
directories =
[
{
directory = "/var/lib/systemd";
user = "root";
group = "root";
mode = "0755";
}
{
directory = "/var/log";
user = "root";
group = "root";
mode = "0755";
}
#{ directory = "/tmp"; user = "root"; group = "root"; mode = "1777"; }
#{ directory = "/var/tmp"; user = "root"; group = "root"; mode = "1777"; }
{
directory = "/var/spool";
user = "root";
group = "root";
mode = "0755";
}
]
++ lib.optionals config.networking.wireless.iwd.enable [
{
directory = "/var/lib/iwd";
user = "root";
group = "root";
mode = "0700";
}
];
};
# State that should be kept forever, and backed up accordingly.
environment.persistence."/persist" = {
hideMounts = true;
files = [
"/etc/machine-id"
"/etc/ssh/ssh_host_ed25519_key"
"/etc/ssh/ssh_host_ed25519_key.pub"
];
directories =
[
{
directory = "/var/lib/nixos";
user = "root";
group = "root";
mode = "0755";
}
]
++ lib.optionals config.security.acme.acceptTerms [
{
directory = "/var/lib/acme";
user = "acme";
group = "acme";
mode = "0755";
}
]
++ lib.optionals config.services.printing.enable [
{
directory = "/var/lib/cups";
user = "root";
group = "root";
mode = "0700";
}
]
++ lib.optionals config.services.fail2ban.enable [
{
directory = "/var/lib/fail2ban";
user = "fail2ban";
group = "fail2ban";
mode = "0750";
}
]
++ lib.optionals config.services.postgresql.enable [
{
directory = "/var/lib/postgresql";
user = "postgres";
group = "postgres";
mode = "0700";
}
]
++ lib.optionals config.services.gitea.enable [
{
directory = config.services.gitea.stateDir;
user = "gitea";
group = "gitea";
mode = "0700";
}
]
++ lib.optionals config.services.caddy.enable [
{
directory = config.services.caddy.dataDir;
user = "caddy";
group = "caddy";
mode = "0700";
}
]
++ lib.optionals config.services.loki.enable [
{
directory = "/var/lib/loki";
user = "loki";
group = "loki";
mode = "0700";
}
]
++ lib.optionals config.services.grafana.enable [
{
directory = config.services.grafana.dataDir;
user = "grafana";
group = "grafana";
mode = "0700";
}
]
++ lib.optionals config.services.kanidm.enableServer [
{
directory = "/var/lib/kanidm";
user = "kanidm";
group = "kanidm";
mode = "0700";
}
]
++ lib.optionals config.services.vaultwarden.enable [
{
directory = "/var/lib/vaultwarden";
user = "vaultwarden";
group = "vaultwarden";
mode = "0700";
}
]
++ lib.optionals config.services.influxdb2.enable [
{
directory = "/var/lib/influxdb2";
user = "influxdb2";
group = "influxdb2";
mode = "0700";
}
]
++ lib.optionals config.services.telegraf.enable [
{
directory = "/var/lib/telegraf";
user = "telegraf";
group = "telegraf";
mode = "0700";
}
];
};
}

110
modules/config/inputrc.nix Normal file
View file

@ -0,0 +1,110 @@
{
environment.etc."inputrc".text = ''
# /etc/inputrc: initialization file for readline
#
# For more information on how this file works, please see the
# INITIALIZATION FILE section of the readline(3) man page
#
# Quick dirty little note:
# To get the key sequence for binding, you can abuse bash.
# While running bash, hit CTRL+V, and then type the key sequence.
# So, typing 'ALT + left arrow' in Konsole gets you back:
# ^[[1;3D
# The readline entry to make this skip back a word will then be:
# "\e[1;3D" backward-word
#
# Customization note:
# You don't need to put all your changes in this file. You can create
# ~/.inputrc which starts off with the line:
# $include /etc/inputrc
# Then put all your own stuff after that.
#
# do not bell on tab-completion
set bell-style none
set history-size -1
set meta-flag on
set input-meta on
set convert-meta off
set output-meta on
# dont output everything on first line
set horizontal-scroll-mode off
# append slash to completed directories & symlinked directories
set mark-directories on
set mark-symlinked-directories on
# dont expand ~ in tab completion
set expand-tilde off
# instead of ringing bell, show list of ambigious completions directly, also show up to 300 items before asking
set show-all-if-ambiguous on
set completion-query-items 300
$if mode=emacs
# for linux console and RH/Debian xterm
# allow the use of the Home/End keys
"\e[1~": beginning-of-line
"\e[4~": end-of-line
# map "page up" and "page down" to search history based on current cmdline
"\e[5~": history-search-backward
"\e[6~": history-search-forward
# allow the use of the Delete/Insert keys
"\e[3~": delete-char
"\e[2~": quoted-insert
# gnome / others (escape + arrow key)
"\e[5C": forward-word
"\e[5D": backward-word
# konsole / xterm / rxvt (escape + arrow key)
"\e\e[C": forward-word
"\e\e[D": backward-word
# gnome / konsole / others (control + arrow key)
"\e[1;5C": forward-word
"\e[1;5D": backward-word
# aterm / eterm (control + arrow key)
"\eOc": forward-word
"\eOd": backward-word
# konsole (alt + arrow key)
"\e[1;3C": forward-word
"\e[1;3D": backward-word
# Chromebooks remap alt + backspace so provide alternative (alt + k)
"\ek": backward-kill-word
$if term=rxvt
"\e[8~": end-of-line
"\e[3^": kill-line
"\e[3@": backward-kill-line
$endif
# for non RH/Debian xterm, can't hurt for RH/Debian xterm
"\eOH": beginning-of-line
"\eOF": end-of-line
# for freebsd console
"\e[H": beginning-of-line
"\e[F": end-of-line
# fix Home and End for German users
"\e[7~": beginning-of-line
"\e[8~": end-of-line
# ctrl [+ shift] + del = kill line [backward]
"\e[3;5~": kill-line
"\e[3;6~": backward-kill-line
$endif
# Up and Down should search history based on current cmdline
"\e[A": history-search-backward
"\e[B": history-search-forward
'';
}

12
modules/config/issue.nix Normal file
View file

@ -0,0 +1,12 @@
let
# IP addresses: ${"${interface} \e{halfbright}\4{${interface}}\e{reset} \e{halfbright}\6{${interface}}\e{reset}"}
issue_text = ''
\d \t
\e{halfbright}\4\e{reset} \e{halfbright}\6\e{reset}
This is \e{cyan}\n\e{reset} [\e{lightblue}\l\e{reset}] (\s \m \r)
'';
in {
environment.etc."issue".text = issue_text;
environment.etc."issue.logo".text = issue_text;
}

334
modules/config/lib.nix Normal file
View file

@ -0,0 +1,334 @@
{
extraLib,
inputs,
lib,
...
}: {
# IP address math library
# https://gist.github.com/duairc/5c9bb3c922e5d501a1edb9e7b3b845ba
# Plus some extensions by us
lib = let
libWithNet = (import "${inputs.lib-net}/net.nix" {inherit lib;}).lib;
in
lib.recursiveUpdate libWithNet {
net = {
cidr = rec {
# host :: (ip | mac | integer) -> cidr -> ip
#
# Wrapper that extends the original host function to
# check whether the argument `n` is in-range for the given cidr.
#
# Examples:
#
# > net.cidr.host 255 "192.168.1.0/24"
# "192.168.1.255"
# > net.cidr.host (256) "192.168.1.0/24"
# <fails with an error message>
# > net.cidr.host (-1) "192.168.1.0/24"
# "192.168.1.255"
# > net.cidr.host (-256) "192.168.1.0/24"
# "192.168.1.0"
# > net.cidr.host (-257) "192.168.1.0/24"
# <fails with an error message>
host = i: n: let
cap = libWithNet.net.cidr.capacity n;
in
assert lib.assertMsg (i >= (-cap) && i < cap) "The host ${toString i} lies outside of ${n}";
libWithNet.net.cidr.host i n;
# hostCidr :: (ip | mac | integer) -> cidr -> cidr
#
# Returns the nth host in the given cidr range (like cidr.host)
# but as a cidr that retains the original prefix length.
#
# Examples:
#
# > net.cidr.hostCidr 2 "192.168.1.0/24"
# "192.168.1.2/24"
hostCidr = n: x: "${libWithNet.net.cidr.host n x}/${toString (libWithNet.net.cidr.length x)}";
# ip :: (cidr | ip) -> ip
#
# Returns just the ip part of the cidr.
#
# Examples:
#
# > net.cidr.ip "192.168.1.100/24"
# "192.168.1.100"
# > net.cidr.ip "192.168.1.100"
# "192.168.1.100"
ip = x: lib.head (lib.splitString "/" x);
# canonicalize :: cidr -> cidr
#
# Replaces the ip of the cidr with the canonical network address
# (first contained address in range)
#
# Examples:
#
# > net.cidr.canonicalize "192.168.1.100/24"
# "192.168.1.0/24"
canonicalize = x: libWithNet.net.cidr.make (libWithNet.net.cidr.length x) (ip x);
# mergev4 :: [cidrv4 | ipv4] -> (cidrv4 | null)
#
# Returns the smallest cidr network that includes all given networks.
# If no cidr mask is given, /32 is assumed.
#
# Examples:
#
# > net.cidr.mergev4 ["192.168.1.1/24" "192.168.6.1/32"]
# "192.168.0.0/21"
mergev4 = addrs_: let
# Append /32 if necessary
addrs = map (x:
if lib.hasInfix "/" x
then x
else "${x}/32")
addrs_;
# The smallest occurring length is the first we need to start checking, since
# any greater cidr length represents a smaller address range which
# wouldn't contain all of the original addresses.
startLength = lib.foldl' lib.min 32 (map libWithNet.net.cidr.length addrs);
possibleLengths = lib.reverseList (lib.range 0 startLength);
# The first ip address will be "expanded" in cidr length until it covers all other
# used addresses.
firstIp = ip (lib.head addrs);
# Return the first (i.e. greatest length -> smallest prefix) cidr length
# in the list that covers all used addresses
bestLength = lib.head (lib.filter
# All given addresses must be contained by the generated address.
(len:
lib.all
(x:
libWithNet.net.cidr.contains
(ip x)
(libWithNet.net.cidr.make len firstIp))
addrs)
possibleLengths);
in
assert lib.assertMsg (!lib.any (lib.hasInfix ":") addrs) "mergev4 cannot operate on ipv6 addresses";
if addrs == []
then null
else libWithNet.net.cidr.make bestLength firstIp;
# mergev6 :: [cidrv6 | ipv6] -> (cidrv6 | null)
#
# Returns the smallest cidr network that includes all given networks.
# If no cidr mask is given, /128 is assumed.
#
# Examples:
#
# > net.cidr.mergev6 ["fd00:dead:cafe::/64" "fd00:fd12:3456:7890::/56"]
# "fd00:c000::/18"
mergev6 = addrs_: let
# Append /128 if necessary
addrs = map (x:
if lib.hasInfix "/" x
then x
else "${x}/128")
addrs_;
# The smallest occurring length is the first we need to start checking, since
# any greater cidr length represents a smaller address range which
# wouldn't contain all of the original addresses.
startLength = lib.foldl' lib.min 128 (map libWithNet.net.cidr.length addrs);
possibleLengths = lib.reverseList (lib.range 0 startLength);
# The first ip address will be "expanded" in cidr length until it covers all other
# used addresses.
firstIp = ip (lib.head addrs);
# Return the first (i.e. greatest length -> smallest prefix) cidr length
# in the list that covers all used addresses
bestLength = lib.head (lib.filter
# All given addresses must be contained by the generated address.
(len:
lib.all
(x:
libWithNet.net.cidr.contains
(ip x)
(libWithNet.net.cidr.make len firstIp))
addrs)
possibleLengths);
in
assert lib.assertMsg (lib.all (lib.hasInfix ":") addrs) "mergev6 cannot operate on ipv4 addresses";
if addrs == []
then null
else libWithNet.net.cidr.make bestLength firstIp;
# merge :: [cidr] -> { cidrv4 = (cidrv4 | null); cidrv6 = (cidrv4 | null); }
#
# Returns the smallest cidr network that includes all given networks,
# but yields two separate result for all given ipv4 and ipv6 addresses.
# Equivalent to calling mergev4 and mergev6 on a partition individually.
merge = addrs: let
v4_and_v6 = lib.partition (lib.hasInfix ":") addrs;
in {
cidrv4 = mergev4 v4_and_v6.wrong;
cidrv6 = mergev6 v4_and_v6.right;
};
# assignIps :: cidr -> [int | ip] -> [string] -> [ip]
#
# Assigns a semi-stable ip address from the given cidr network to each hostname.
# The algorithm is based on hashing (abusing sha256) with linear probing.
# The order of hosts doesn't matter. No ip (or offset) from the reserved list
# will be assigned. The network address and broadcast address will always be reserved
# automatically.
#
# Examples:
#
# > net.cidr.assignIps "192.168.100.1/24" [] ["a" "b" "c"]
# { a = "192.168.100.202"; b = "192.168.100.74"; c = "192.168.100.226"; }
#
# > net.cidr.assignIps "192.168.100.1/24" [] ["a" "b" "c" "a-new-elem"]
# { a = "192.168.100.202"; a-new-elem = "192.168.100.88"; b = "192.168.100.74"; c = "192.168.100.226"; }
#
# > net.cidr.assignIps "192.168.100.1/24" [202 "192.168.100.74"] ["a" "b" "c"]
# { a = "192.168.100.203"; b = "192.168.100.75"; c = "192.168.100.226"; }
assignIps = net: reserved: hosts: let
cidrSize = libWithNet.net.cidr.size net;
capacity = libWithNet.net.cidr.capacity net;
# The base address of the network. Used to convert ip-based reservations to offsets
baseAddr = host 0 net;
# Reserve some values for the network, host and broadcast address.
# The network and broadcast address should never be used, and we
# want to reserve the host address for the host. We also convert
# any ips to offsets here.
init = lib.unique (
[0 (capacity - 1)]
++ lib.flip map reserved (x:
if builtins.typeOf x == "int"
then x
else -(libWithNet.net.ip.diff baseAddr x))
);
nHosts = builtins.length hosts;
nInit = builtins.length init;
# Pre-sort all hosts, to ensure ordering invariance
sortedHosts =
lib.warnIf
((nInit + nHosts) > 0.3 * capacity)
"assignIps: hash stability may be degraded since utilization is >30%"
(builtins.sort builtins.lessThan hosts);
# Generates a hash (i.e. offset value) for a given hostname
hashElem = x:
builtins.bitAnd (capacity - 1)
(extraLib.hexToDec (builtins.substring 0 16 (builtins.hashString "sha256" x)));
# Do linear probing. Returns the first unused value at or after the given value.
probe = avoid: value:
if lib.elem value avoid
# TODO lib.mod
# Poor man's modulo, because nix has no modulo. Luckily we operate on a residue
# class of x modulo 2^n, so we can use bitAnd instead.
then probe avoid (builtins.bitAnd (capacity - 1) (value + 1))
else value;
# Hash a new element and avoid assigning any existing values.
assignOne = {
assigned,
used,
}: x: let
value = probe used (hashElem x);
in {
assigned =
assigned
// {
${x} = host value net;
};
used = [value] ++ used;
};
in
assert lib.assertMsg (cidrSize >= 2 && cidrSize <= 62)
"assignIps: cidrSize=${toString cidrSize} is not in [2, 62].";
assert lib.assertMsg (nHosts <= capacity - nInit)
"assignIps: number of hosts (${toString nHosts}) must be <= capacity (${toString capacity}) - reserved (${toString nInit})";
# Assign an ip in the subnet to each element, in order
(lib.foldl' assignOne {
assigned = {};
used = init;
}
sortedHosts)
.assigned;
};
ip = rec {
# Checks whether the given address (with or without cidr notation) is an ipv4 address.
isv4 = x: !isv6 x;
# Checks whether the given address (with or without cidr notation) is an ipv6 address.
isv6 = lib.hasInfix ":";
};
mac = {
# Adds offset to the given base address and ensures the result is in
# a locally administered range by replacing the second nibble with a 2.
addPrivate = base: offset: let
added = libWithNet.net.mac.add base offset;
pre = lib.substring 0 1 added;
suf = lib.substring 2 (-1) added;
in "${pre}2${suf}";
# assignMacs :: mac (base) -> int (size) -> [int | mac] (reserved) -> [string] (hosts) -> [mac]
#
# Assigns a semi-stable MAC address starting in [base, base + 2^size) to each hostname.
# The algorithm is based on hashing (abusing sha256) with linear probing.
# The order of hosts doesn't matter. No mac (or offset) from the reserved list
# will be assigned.
#
# Examples:
#
# > net.mac.assignMacs "11:22:33:00:00:00" 24 [] ["a" "b" "c"]
# { a = "11:22:33:1b:bd:ca"; b = "11:22:33:39:59:4a"; c = "11:22:33:50:7a:e2"; }
#
# > net.mac.assignMacs "11:22:33:00:00:00" 24 [] ["a" "b" "c" "a-new-elem"]
# { a = "11:22:33:1b:bd:ca"; a-new-elem = "11:22:33:d6:5d:58"; b = "11:22:33:39:59:4a"; c = "11:22:33:50:7a:e2"; }
#
# > net.mac.assignMacs "11:22:33:00:00:00" 24 ["11:22:33:1b:bd:ca"] ["a" "b" "c"]
# { a = "11:22:33:1b:bd:cb"; b = "11:22:33:39:59:4a"; c = "11:22:33:50:7a:e2"; }
assignMacs = base: size: reserved: hosts: let
capacity = extraLib.pow 2 size;
baseAsInt = libWithNet.net.mac.diff base "00:00:00:00:00:00";
init = lib.unique (
lib.flip map reserved (x:
if builtins.typeOf x == "int"
then x
else libWithNet.net.mac.diff x base)
);
nHosts = builtins.length hosts;
nInit = builtins.length init;
# Pre-sort all hosts, to ensure ordering invariance
sortedHosts =
lib.warnIf
((nInit + nHosts) > 0.3 * capacity)
"assignMacs: hash stability may be degraded since utilization is >30%"
(builtins.sort builtins.lessThan hosts);
# Generates a hash (i.e. offset value) for a given hostname
hashElem = x:
builtins.bitAnd (capacity - 1)
(extraLib.hexToDec (builtins.substring 0 16 (builtins.hashString "sha256" x)));
# Do linear probing. Returns the first unused value at or after the given value.
probe = avoid: value:
if lib.elem value avoid
# TODO lib.mod
# Poor man's modulo, because nix has no modulo. Luckily we operate on a residue
# class of x modulo 2^n, so we can use bitAnd instead.
then probe avoid (builtins.bitAnd (capacity - 1) (value + 1))
else value;
# Hash a new element and avoid assigning any existing values.
assignOne = {
assigned,
used,
}: x: let
value = probe used (hashElem x);
in {
assigned =
assigned
// {
${x} = libWithNet.net.mac.add value base;
};
used = [value] ++ used;
};
in
assert lib.assertMsg (size >= 2 && size <= 62)
"assignMacs: size=${toString size} is not in [2, 62].";
assert lib.assertMsg (builtins.bitAnd (capacity - 1) baseAsInt == 0)
"assignMacs: the size=${toString size} least significant bits of the base mac address must be 0.";
assert lib.assertMsg (nHosts <= capacity - nInit)
"assignMacs: number of hosts (${toString nHosts}) must be <= capacity (${toString capacity}) - reserved (${toString nInit})";
# Assign an ip in the subnet to each element, in order
(lib.foldl' assignOne {
assigned = {};
used = init;
}
sortedHosts)
.assigned;
};
};
};
}

View file

@ -0,0 +1,8 @@
{
# If the host defines microvms, ensure that our modules and
# some boilerplate is imported automatically.
meta.microvms.commonImports = [
../.
{home-manager.users.root.home.minimal = true;}
];
}

20
modules/config/net.nix Normal file
View file

@ -0,0 +1,20 @@
{
config,
lib,
nodeName,
...
}: {
systemd.network.enable = true;
networking = {
hostName = nodeName;
useDHCP = lib.mkForce false;
useNetworkd = true;
dhcpcd.enable = false;
# Rename known network interfaces from local secrets
renameInterfacesByMac =
lib.mapAttrs (_: v: v.mac)
(config.repo.secrets.local.networking.interfaces or {});
};
}

View file

@ -0,0 +1,70 @@
{
config,
lib,
...
}: {
networking.nftables = {
stopRuleset = lib.mkDefault ''
table inet filter {
chain input {
type filter hook input priority filter; policy drop;
ct state invalid drop
ct state {established, related} accept
iifname lo accept
meta l4proto ipv6-icmp accept
meta l4proto icmp accept
tcp dport ${toString (lib.head config.services.openssh.ports)} accept
}
chain forward {
type filter hook forward priority filter; policy drop;
}
chain output {
type filter hook output priority filter; policy accept;
}
}
'';
firewall = {
enable = true;
# TODO mkForce nftables
zones = lib.mkForce {
local.localZone = true;
};
rules = lib.mkForce {
icmp = {
early = true;
after = ["ct"];
from = "all";
to = ["local"];
extraLines = [
"ip6 nexthdr icmpv6 icmpv6 type { echo-request, destination-unreachable, packet-too-big, time-exceeded, parameter-problem, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert } accept"
"ip protocol icmp icmp type { echo-request, destination-unreachable, router-advertisement, time-exceeded, parameter-problem } accept"
#"ip6 saddr fe80::/10 ip6 daddr fe80::/10 udp dport 546 accept" # (dhcpv6)
];
};
ssh = {
early = true;
after = ["ct"];
from = "all";
to = ["local"];
allowedTCPPorts = config.services.openssh.ports;
};
untrusted-to-local = {
from = ["untrusted"];
to = ["local"];
inherit
(config.networking.firewall)
allowedTCPPorts
allowedUDPPorts
;
};
};
};
};
}

58
modules/config/nix.nix Normal file
View file

@ -0,0 +1,58 @@
{
inputs,
pkgs,
stateVersion,
...
}: {
environment.etc."nixos/configuration.nix".source = pkgs.writeText "configuration.nix" ''
assert builtins.trace "This is a dummy config, use colmena!" false;
{ }
'';
nix = {
settings = {
auto-optimise-store = true;
allowed-users = ["@wheel"];
trusted-users = ["root" "@wheel"];
substituters = [
"https://nix-config.cachix.org"
"https://nix-community.cachix.org"
];
trusted-public-keys = [
"nix-config.cachix.org-1:Vd6raEuldeIZpttVQfrUbLvXJHzzzkS0pezXCVVjDG4="
"nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs="
];
cores = 0;
max-jobs = "auto";
};
daemonCPUSchedPolicy = "batch";
daemonIOSchedPriority = 5;
distributedBuilds = true;
extraOptions = ''
builders-use-substitutes = true
experimental-features = nix-command flakes
flake-registry = /etc/nix/registry.json
'';
nixPath = ["nixpkgs=/run/current-system/nixpkgs"];
optimise.automatic = true;
gc = {
automatic = true;
dates = "monthly";
options = "--delete-older-than 90d";
};
# Define global flakes for this system
registry = {
nixpkgs.flake = inputs.nixpkgs;
p.flake = inputs.nixpkgs;
pkgs.flake = inputs.nixpkgs;
templates.flake = inputs.templates;
};
};
system = {
extraSystemBuilderCmds = ''
ln -sv ${pkgs.path} $out/nixpkgs
'';
inherit stateVersion;
};
}

View file

@ -0,0 +1,76 @@
{
config,
lib,
...
}: {
services.resolved = {
enable = true;
dnssec = "allow-downgrade";
fallbackDns = [
"1.1.1.1"
"2606:4700:4700::1111"
"8.8.8.8"
"2001:4860:4860::8844"
];
llmnr = "false";
extraConfig = ''
Domains=~.
MulticastDNS=true
'';
};
system.nssDatabases.hosts = lib.mkMerge [
(lib.mkBefore ["mdns_minimal [NOTFOUND=return]"])
(lib.mkAfter ["mdns"])
];
# Open port 5353 for any interfaces that have MulticastDNS enabled
networking.nftables.firewall = let
# Determine all networks that have MulticastDNS enabled
networksWithMulticast =
lib.filter
(n: config.systemd.network.networks.${n}.networkConfig.MulticastDNS or false)
(lib.attrNames config.systemd.network.networks);
# Determine all known mac addresses and the corresponding link name
# based on the renameInterfacesByMac option.
knownMacs =
lib.mapAttrs'
(k: v: lib.nameValuePair v k)
config.networking.renameInterfacesByMac;
# A helper that returns the link name for the given mac address,
# or null if it doesn't exist or the given mac was null.
linkNameFor = mac:
if mac == null
then null
else knownMacs.${mac} or null;
# Calls the given function for each network that has MulticastDNS enabled,
# and collects all non-null values.
mapNetworks = f: lib.filter (v: v != null) (map f networksWithMulticast);
# All interfaces on which MulticastDNS is used
mdnsInterfaces = lib.unique (
# For each network that is matched by MAC, lookup the link name
# and if map the definition name to the link name.
mapNetworks (x: linkNameFor (config.systemd.network.networks.${x}.matchConfig.MACAddress or null))
# For each network that is matched by name, map the definition
# name to the link name.
++ mapNetworks (x: config.systemd.network.networks.${x}.matchConfig.Name or null)
);
in
lib.mkIf (mdnsInterfaces != []) {
# TODO mkForce nftables
zones = lib.mkForce {
mdns.interfaces = mdnsInterfaces;
};
rules = lib.mkForce {
mdns-to-local = {
from = ["mdns"];
to = ["local"];
allowedUDPPorts = [5353];
};
};
};
}

View file

@ -0,0 +1,64 @@
{
inputs,
lib,
nodePath,
...
}: {
# Define local repo secrets
repo.secretFiles = let
local = nodePath + "/secrets/local.nix.age";
in
{
global = ../../secrets/global.nix.age;
}
// lib.optionalAttrs (nodePath != null && lib.pathExists local) {inherit local;};
# Setup secret rekeying parameters
age.rekey = {
inherit
(inputs.self.secretsConfig)
masterIdentities
extraEncryptionPubkeys
;
# This is technically impure, but intended. We need to rekey on the
# current system due to yubikey availability.
forceRekeyOnSystem = builtins.extraBuiltins.unsafeCurrentSystem;
hostPubkey = let
pubkeyPath =
if nodePath == null
then null
else nodePath + "/secrets/host.pub";
in
lib.mkIf (pubkeyPath != null && lib.pathExists pubkeyPath) pubkeyPath;
};
age.generators.dhparams.script = {pkgs, ...}: "${pkgs.openssl}/bin/openssl dhparam 4096";
age.generators.basic-auth.script = {
pkgs,
lib,
decrypt,
deps,
...
}:
lib.flip lib.concatMapStrings deps ({
name,
host,
file,
}: ''
echo " -> Aggregating "${lib.escapeShellArg host}":"${lib.escapeShellArg name}"" >&2
${decrypt} ${lib.escapeShellArg file} \
| ${pkgs.apacheHttpd}/bin/htpasswd -niBC 12 ${lib.escapeShellArg host}"+"${lib.escapeShellArg name} \
|| die "Failure while aggregating basic auth hashes"
'');
# Just before switching, remove the agenix directory if it exists.
# This can happen when a secret is used in the initrd because it will
# then be copied to the initramfs under the same path. This materializes
# /run/agenix as a directory which will cause issues when the actual system tries
# to create a link called /run/agenix. Agenix should probably fail in this case,
# but doesn't and instead puts the generation link into the existing directory.
# TODO See https://github.com/ryantm/agenix/pull/187.
system.activationScripts.removeAgenixLink.text = "[[ ! -L /run/agenix ]] && [[ -d /run/agenix ]] && rm -rf /run/agenix";
system.activationScripts.agenixNewGeneration.deps = ["removeAgenixLink"];
}

17
modules/config/ssh.nix Normal file
View file

@ -0,0 +1,17 @@
{lib, ...}: {
services.openssh = {
enable = true;
authorizedKeysFiles = lib.mkForce ["/etc/ssh/authorized_keys.d/%u"];
settings = {
PasswordAuthentication = false;
KbdInteractiveAuthentication = false;
PermitRootLogin = "yes";
};
hostKeys = [
{
path = "/etc/ssh/ssh_host_ed25519_key";
type = "ed25519";
}
];
};
}

10
modules/config/system.nix Normal file
View file

@ -0,0 +1,10 @@
{lib, ...}: {
# Disable sudo which is entierly unnecessary.
security.sudo.enable = false;
time.timeZone = lib.mkDefault "Europe/Berlin";
i18n.defaultLocale = "C.UTF-8";
console.keyMap = "de-latin1-nodeadkeys";
systemd.enableUnifiedCgroupHierarchy = true;
}

27
modules/config/users.nix Normal file
View file

@ -0,0 +1,27 @@
{
users.mutableUsers = false;
users.deterministicIds = let
uidGid = id: {
uid = id;
gid = id;
};
in {
systemd-oom = uidGid 999;
systemd-coredump = uidGid 998;
sshd = uidGid 997;
nscd = uidGid 996;
polkituser = uidGid 995;
microvm = uidGid 994;
promtail = uidGid 993;
grafana = uidGid 992;
acme = uidGid 991;
kanidm = uidGid 990;
loki = uidGid 989;
vaultwarden = uidGid 988;
oauth2_proxy = uidGid 987;
influxdb2 = uidGid 986;
telegraf = uidGid 985;
rtkit = uidGid 984;
};
}

12
modules/config/xdg.nix Normal file
View file

@ -0,0 +1,12 @@
{
environment.etc."xdg/user-dirs.defaults".text = ''
DESKTOP=tmp
DOWNLOAD=download
TEMPLATES=tmp
PUBLICSHARE=opt
DOCUMENTS=documents
MUSIC=music
PICTURES=pictures
VIDEOS=tmp
'';
}

42
modules/default.nix Normal file
View file

@ -0,0 +1,42 @@
{
imports = [
../users/root
./config/boot.nix
./config/home-manager.nix
./config/impermanence.nix
./config/inputrc.nix
./config/issue.nix
./config/lib.nix
./config/microvms.nix
./config/net.nix
./config/nftables.nix
./config/nix.nix
./config/resolved.nix
./config/secrets.nix
./config/ssh.nix
./config/system.nix
./config/users.nix
./config/xdg.nix
./meta/microvms.nix
./meta/nginx.nix
./meta/oauth2-proxy.nix
./meta/promtail.nix
./meta/telegraf.nix
./meta/wireguard-proxy.nix
./meta/wireguard.nix
./networking/hostapd.nix
./networking/interface-naming.nix
./networking/provided-domains.nix
./repo/distributed-config.nix
./repo/meta.nix
./repo/secrets.nix
./security/acme-wildcard.nix
./system/deteministic-ids.nix
];
}

View file

@ -1,136 +0,0 @@
{
config,
lib,
nodePath,
...
}: let
inherit
(lib)
assertMsg
filter
flip
genAttrs
hasInfix
head
mapAttrs
mapAttrs'
mdDoc
mkIf
mkOption
nameValuePair
optionals
removeSuffix
types
;
in {
options.extra = {
acme.wildcardDomains = mkOption {
default = [];
example = ["example.org"];
type = types.listOf types.str;
description = mdDoc ''
All domains for which a wildcard certificate will be generated.
This will define the given `security.acme.certs` and set `extraDomainNames` correctly,
but does not fill any options such as credentials or dnsProvider. These have to be set
individually for each cert by the user or via `security.acme.defaults`.
'';
};
};
options.services.nginx.virtualHosts = mkOption {
type = types.attrsOf (types.submodule ({config, ...}: {
options.recommendedSecurityHeaders = mkOption {
type = types.bool;
default = true;
description = mdDoc ''Whether to add additional security headers to the "/" location.'';
};
config = mkIf config.recommendedSecurityHeaders {
locations."/".extraConfig = ''
# Enable HTTP Strict Transport Security (HSTS)
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload";
# Minimize information leaked to other domains
add_header Referrer-Policy "origin-when-cross-origin";
add_header X-XSS-Protection "1; mode=block";
add_header X-Frame-Options "DENY";
add_header X-Content-Type-Options "nosniff";
'';
};
}));
};
config = {
lib.extra = {
# For a given domain, this searches for a matching wildcard acme domain that
# would include the given domain. If no such domain is defined in
# extra.acme.wildcardDomains, an assertion is triggered.
matchingWildcardCert = domain: let
matchingCerts =
filter
(x: !hasInfix "." (removeSuffix ".${x}" domain))
config.extra.acme.wildcardDomains;
in
assert assertMsg (matchingCerts != []) "No wildcard certificate was defined that matches ${domain}";
head matchingCerts;
};
security.acme.certs = genAttrs config.extra.acme.wildcardDomains (domain: {
extraDomainNames = ["*.${domain}"];
});
age.secrets = mkIf config.services.nginx.enable {
"dhparams.pem" = {
rekeyFile = nodePath + "/secrets/dhparams.pem.age";
generator = "dhparams";
mode = "440";
group = "nginx";
};
};
# Sensible defaults for nginx
services.nginx = mkIf config.services.nginx.enable {
recommendedBrotliSettings = true;
recommendedGzipSettings = true;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
# SSL config
sslCiphers = "EECDH+AESGCM:EDH+AESGCM:!aNULL";
sslDhparam = config.age.secrets."dhparams.pem".path;
commonHttpConfig = ''
log_format json_combined escape=json '{'
'"time": $msec,'
'"remote_addr":"$remote_addr",'
'"status":$status,'
'"method":"$request_method",'
'"host":"$host",'
'"uri":"$request_uri",'
'"request_size":$request_length,'
'"response_size":$body_bytes_sent,'
'"response_time":$request_time,'
'"referrer":"$http_referer",'
'"user_agent":"$http_user_agent"'
'}';
error_log syslog:server=unix:/dev/log,nohostname;
access_log syslog:server=unix:/dev/log,nohostname json_combined;
ssl_ecdh_curve secp384r1;
'';
virtualHosts.localhost = {
locations."= /nginx_status".extraConfig = ''
allow 127.0.0.0/8;
deny all;
stub_status;
'';
};
};
networking.firewall.allowedTCPPorts = optionals config.services.nginx.enable [80 443];
services.telegraf.extraConfig.inputs = mkIf config.services.nginx.enable {
nginx.urls = ["http://localhost/nginx_status"];
};
};
}

View file

@ -35,8 +35,8 @@
;
parentConfig = config;
cfg = config.extra.microvms;
inherit (config.extra.microvms) vms;
cfg = config.meta.microvms;
inherit (config.meta.microvms) vms;
inherit (config.lib) net;
# Configuration for each microvm
@ -94,7 +94,7 @@
nodes = mkMerge config.microvm.vms.${vmName}.config.options.nodes.definitions;
microvm.vms.${vmName} = let
node = import ../nix/generate-node.nix inputs vmCfg.nodeName {
node = import ../../nix/generate-node.nix inputs vmCfg.nodeName {
inherit (vmCfg) system configPath;
};
mac = (net.mac.assignMacs "02:01:27:00:00:00" 24 [] (attrNames vms)).${vmName};
@ -165,7 +165,7 @@
gc.automatic = mkForce false;
};
extra.networking.renameInterfacesByMac.${vmCfg.networking.mainLinkName} = mac;
networking.renameInterfacesByMac.${vmCfg.networking.mainLinkName} = mac;
systemd.network.networks =
{
@ -186,7 +186,7 @@
# would not come online if the private key wasn't rekeyed yet).
# FIXME ideally this would be conditional at runtime if the
# agenix activation had an error, but this is not trivial.
${parentConfig.extra.wireguard."${nodeName}-local-vms".unitConfName} = {
${parentConfig.meta.wireguard."${nodeName}-local-vms".unitConfName} = {
linkConfig.RequiredForOnline = "no";
};
};
@ -198,7 +198,7 @@
};
};
extra.wireguard = mkIf vmCfg.localWireguard {
meta.wireguard = mkIf vmCfg.localWireguard {
"${nodeName}-local-vms" = {
server = {
host =
@ -222,7 +222,7 @@ in {
{microvm.host.enable = vms != {};}
];
options.extra.microvms = {
options.meta.microvms = {
commonImports = mkOption {
type = types.listOf types.unspecified;
default = [];
@ -362,7 +362,7 @@ in {
config = mkIf (vms != {}) (
{
# Define a local wireguard server to communicate with vms securely
extra.wireguard = mkIf (any (x: x.localWireguard) (attrValues vms)) {
meta.wireguard = mkIf (any (x: x.localWireguard) (attrValues vms)) {
"${nodeName}-local-vms" = {
server = {
host =

79
modules/meta/nginx.nix Normal file
View file

@ -0,0 +1,79 @@
{
config,
lib,
nodePath,
...
}: let
inherit
(lib)
mdDoc
mkIf
mkOption
types
;
in {
options.services.nginx.virtualHosts = mkOption {
type = types.attrsOf (types.submodule ({config, ...}: {
options.recommendedSecurityHeaders = mkOption {
type = types.bool;
default = true;
description = mdDoc ''Whether to add additional security headers to the "/" location.'';
};
config = mkIf config.recommendedSecurityHeaders {
locations."/".extraConfig = ''
# Enable HTTP Strict Transport Security (HSTS)
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload";
# Minimize information leaked to other domains
add_header Referrer-Policy "origin-when-cross-origin";
add_header X-XSS-Protection "1; mode=block";
add_header X-Frame-Options "DENY";
add_header X-Content-Type-Options "nosniff";
'';
};
}));
};
config = mkIf config.services.nginx.enable {
age.secrets."dhparams.pem" = {
rekeyFile = nodePath + "/secrets/dhparams.pem.age";
generator = "dhparams";
mode = "440";
group = "nginx";
};
# Sensible defaults for nginx
services.nginx = {
recommendedBrotliSettings = true;
recommendedGzipSettings = true;
recommendedOptimisation = true;
recommendedProxySettings = true;
recommendedTlsSettings = true;
# SSL config
sslCiphers = "EECDH+AESGCM:EDH+AESGCM:!aNULL";
sslDhparam = config.age.secrets."dhparams.pem".path;
commonHttpConfig = ''
log_format json_combined escape=json '{'
'"time": $msec,'
'"remote_addr":"$remote_addr",'
'"status":$status,'
'"method":"$request_method",'
'"host":"$host",'
'"uri":"$request_uri",'
'"request_size":$request_length,'
'"response_size":$body_bytes_sent,'
'"response_time":$request_time,'
'"referrer":"$http_referer",'
'"user_agent":"$http_user_agent"'
'}';
error_log syslog:server=unix:/dev/log,nohostname;
access_log syslog:server=unix:/dev/log,nohostname json_combined;
ssl_ecdh_curve secp384r1;
'';
};
networking.firewall.allowedTCPPorts = [80 443];
};
}

View file

@ -17,9 +17,9 @@
types
;
cfg = config.extra.oauth2_proxy;
cfg = config.meta.oauth2_proxy;
in {
options.extra.oauth2_proxy = {
options.meta.oauth2_proxy = {
enable = mkEnableOption (mdDoc "oauth2 proxy");
cookieDomain = mkOption {
@ -141,7 +141,7 @@ in {
virtualHosts.${cfg.portalDomain} = {
forceSSL = true;
useACMEHost = config.lib.extra.matchingWildcardCert cfg.portalDomain;
useACMEWildcardHost = true;
oauth2.enable = true;
locations."/".proxyPass = "http://oauth2_proxy";
};

View file

@ -15,9 +15,9 @@
types
;
cfg = config.extra.promtail;
cfg = config.meta.promtail;
in {
options.extra.promtail = {
options.meta.promtail = {
enable = mkEnableOption (mdDoc "promtail to push logs to a loki instance.");
proxy = mkOption {
type = types.str;
@ -50,7 +50,7 @@ in {
{
basic_auth.username = "${nodeName}+promtail-loki-basic-auth-password";
basic_auth.password_file = config.age.secrets.promtail-loki-basic-auth-password.path;
url = "https://${nodes.${cfg.proxy}.config.providedDomains.loki}/loki/api/v1/push";
url = "https://${nodes.${cfg.proxy}.config.networking.providedDomains.loki}/loki/api/v1/push";
}
];

View file

@ -17,9 +17,9 @@
types
;
cfg = config.extra.telegraf;
cfg = config.meta.telegraf;
in {
options.extra.telegraf = {
options.meta.telegraf = {
enable = mkEnableOption (mdDoc "telegraf to push metrics to influx.");
influxdb2 = {
domain = mkOption {
@ -111,12 +111,23 @@ in {
path_smartctl = "${pkgs.smartmontools}/bin/smartctl";
use_sudo = true;
};
}
// optionalAttrs config.services.nginx.enable {
nginx.urls = ["http://localhost/nginx_status"];
# TODO } // optionalAttrs config.services.iwd.enable {
# TODO wireless = { };
};
};
};
services.nginx.virtualHosts = mkIf config.services.telegraf.enable {
localhost.locations."= /nginx_status".extraConfig = ''
allow 127.0.0.0/8;
deny all;
stub_status;
'';
};
systemd.services.telegraf = {
path = [
"/run/wrappers"

View file

@ -0,0 +1,81 @@
{
config,
lib,
nodes,
...
}: let
inherit
(lib)
attrNames
flip
mdDoc
mkForce
mkIf
mkMerge
mkOption
types
;
cfg = config.meta.wireguard-proxy;
in {
options.meta.wireguard-proxy = mkOption {
default = {};
description = mdDoc ''
Each entry here will setup a wireguard network that connects via the
given node and adds appropriate firewall zones. There will a zone for
the interface and one for the proxy server specifically. A corresponding
rule `''${name}-to-local` will be created to easily expose services to the proxy.
'';
type = types.attrsOf (types.submodule ({name, ...}: {
options = {
nicName = mkOption {
type = types.str;
default = "proxy-${name}";
description = mdDoc "The name for the created wireguard network and its interface";
};
allowedTCPPorts = mkOption {
type = types.listOf types.int;
default = [];
description = mdDoc "Convenience option to allow incoming TCP connections from the proxy server (just the server, not the entire network).";
};
allowedUDPPorts = mkOption {
type = types.listOf types.int;
default = [];
description = mdDoc "Convenience option to allow incoming UDP connections from the proxy server (just the server, not the entire network).";
};
};
}));
};
config = mkIf (cfg != {}) {
meta.wireguard = mkMerge (flip map (attrNames cfg) (proxy: {
${cfg.${proxy}.nicName}.client.via = proxy;
}));
networking.nftables.firewall = mkMerge (flip map (attrNames cfg) (proxy: {
zones = mkForce {
# Parent zone for the whole interface
${cfg.${proxy}.nicName}.interfaces = [cfg.${proxy}.nicName];
# Subzone to specifically target the proxy host
${proxy} = {
parent = cfg.${proxy}.nicName;
ipv4Addresses = [nodes.${proxy}.config.meta.wireguard.${cfg.${proxy}.nicName}.ipv4];
ipv6Addresses = [nodes.${proxy}.config.meta.wireguard.${cfg.${proxy}.nicName}.ipv6];
};
};
rules = mkForce {
"${proxy}-to-local" = {
from = [proxy];
to = ["local"];
inherit
(cfg.${proxy})
allowedTCPPorts
allowedUDPPorts
;
};
};
}));
};
}

View file

@ -44,7 +44,7 @@
;
inherit (config.lib) net;
cfg = config.extra.wireguard;
cfg = config.meta.wireguard;
configForNetwork = wgName: wgCfg: let
inherit
@ -258,7 +258,7 @@
};
};
in {
options.extra.wireguard = mkOption {
options.meta.wireguard = mkOption {
default = {};
description = "Configures wireguard networks via systemd-networkd.";
type = types.lazyAttrsOf (types.submodule ({

View file

@ -1193,4 +1193,5 @@ in {
};
};
};
disabledModules = ["services/networking/hostapd.nix"];
}

View file

@ -1,3 +1,4 @@
# Provides an option to easily rename interfaces by their mac addresses.
{
config,
extraLib,
@ -15,7 +16,7 @@
types
;
cfg = config.extra.networking.renameInterfacesByMac;
cfg = config.networking.renameInterfacesByMac;
interfaceNamesUdevRules = pkgs.writeTextFile {
name = "interface-names-udev-rules";
@ -25,7 +26,7 @@
destination = "/etc/udev/rules.d/01-interface-names.rules";
};
in {
options.extra.networking.renameInterfacesByMac = mkOption {
options.networking.renameInterfacesByMac = mkOption {
default = {};
example = {lan = "11:22:33:44:55:66";};
description = "Allows naming of network interfaces based on their physical address";

View file

@ -1,5 +1,5 @@
{lib, ...}: {
options.providedDomains = lib.mkOption {
options.networking.providedDomains = lib.mkOption {
type = lib.types.attrsOf lib.types.str;
default = {};
description = "Registry of domains that this host 'provides' (that refer to this host with some functionality). For easy cross-node referencing.";

View file

@ -0,0 +1,7 @@
{
boot.loader.grub = {
enable = true;
efiSupport = false;
configurationLimit = 32;
};
}

View file

@ -0,0 +1,7 @@
{
boot.loader = {
systemd-boot.enable = true;
systemd-boot.configurationLimit = 32;
efi.canTouchEfiVariables = true;
};
}

View file

@ -0,0 +1,9 @@
{
imports = [
./documentation.nix
./yubikey.nix
];
environment.enableDebugInfo = true;
repo.defineNixExtraBuiltins = true;
}

View file

@ -0,0 +1,12 @@
{
lib,
pkgs,
...
}: {
environment.systemPackages = with pkgs; [man-pages];
documentation = {
dev.enable = true;
man.enable = true;
info.enable = lib.mkForce false;
};
}

View file

@ -0,0 +1,5 @@
{pkgs, ...}: {
environment.systemPackages = with pkgs; [yubikey-manager yubikey-personalization age-plugin-yubikey];
services.udev.packages = with pkgs; [yubikey-personalization libu2f-host];
services.pcscd.enable = true;
}

View file

@ -0,0 +1,6 @@
{pkgs, ...}: {
imports = [
./fonts.nix
./wayland.nix
];
}

View file

@ -0,0 +1,53 @@
{pkgs, ...}: {
fonts = {
enableDefaultFonts = false;
enableGhostscriptFonts = false;
fontDir.enable = false;
fontconfig = {
defaultFonts = {
sansSerif = ["IBM Plex Sans"];
serif = ["IBM Plex Sans"];
monospace = ["FiraCode Nerd Font"];
emoji = ["Noto Color Emoji"];
};
localConf = ''
<?xml version="1.0"?>
<!DOCTYPE fontconfig SYSTEM "fonts.dtd">
<fontconfig>
<alias binding="weak">
<family>monospace</family>
<prefer>
<family>emoji</family>
</prefer>
</alias>
<alias binding="weak">
<family>sans-serif</family>
<prefer>
<family>emoji</family>
</prefer>
</alias>
<alias binding="weak">
<family>serif</family>
<prefer>
<family>emoji</family>
</prefer>
</alias>
</fontconfig>
'';
};
fonts = with pkgs; [
(nerdfonts.override {fonts = ["FiraCode"];})
ibm-plex
dejavu_fonts
unifont
freefont_ttf
gyre-fonts # TrueType substitutes for standard PostScript fonts
liberation_ttf
noto-fonts
noto-fonts-cjk-sans
noto-fonts-cjk-serif
noto-fonts-emoji
noto-fonts-extra
];
};
}

View file

@ -0,0 +1,10 @@
{pkgs, ...}: {
environment.systemPackages = with pkgs; [wayland];
services.dbus.enable = true;
xdg.portal = {
enable = true;
wlr.enable = true;
# gtk portal needed to make gtk apps happy
extraPortals = with pkgs; [xdg-desktop-portal-gtk];
};
}

View file

@ -0,0 +1,26 @@
{pkgs, ...}: {
environment.systemPackages = with pkgs; [bluetuith];
hardware.bluetooth = {
enable = true;
powerOnBoot = true;
disabledPlugins = ["sap"];
settings = {
General = {
FastConnectable = "true";
JustWorksRepairing = "always";
MultiProfile = "multiple";
};
};
};
hardware.pulseaudio = {
package = pkgs.pulseaudio.override {bluetoothSupport = true;};
extraConfig = ''
load-module module-bluetooth-discover
load-module module-bluetooth-policy
load-module module-switch-on-connect
'';
extraModules = with pkgs; [pulseaudio-modules-bt];
};
}

View file

@ -0,0 +1,3 @@
{
boot.initrd.availableKernelModules = ["virtio_pci" "virtio_net" "virtio_scsi" "virtio_blk"];
}

View file

@ -0,0 +1,3 @@
{
powerManagement.cpuFreqGovernor = "powersave";
}

View file

@ -0,0 +1,16 @@
{
boot.blacklistedKernelModules = ["nouveau"];
hardware = {
nvidia = {
modesetting.enable = true;
nvidiaPersistenced = true;
};
opengl = {
enable = true;
driSupport32Bit = true;
};
};
services.xserver.videoDrivers = ["nvidia"];
}

View file

@ -0,0 +1,33 @@
{
lib,
config,
nixos-hardware,
pkgs,
...
}: {
imports = [
nixos-hardware.common-pc-ssd
./physical.nix
];
boot.initrd.availableKernelModules = [
"usbhid"
"usb_storage"
# Ethernet
"dwmac_generic"
"dwmac_meson8b"
"cfg80211"
# HDMI
"snd_soc_meson_g12a_tohdmitx"
"snd_soc_meson_g12a_toacodec"
"mdio_mux_meson_g12a"
"dw_hdmi"
"meson_vdec"
"meson_dw_hdmi"
"meson_drm"
"meson_rng"
"drm"
"display_connector"
];
boot.kernelParams = ["console=ttyAML0,115200n8" "console=tty0"];
}

View file

@ -0,0 +1,13 @@
# Configuration for actual physical machines
{config, ...}: {
hardware = {
enableRedistributableFirmware = true;
enableAllFirmware = true;
};
services = {
fwupd.enable = true;
smartd.enable = true;
thermald.enable = builtins.elem config.nixpkgs.system ["x86_64-linux"];
};
}

View file

@ -0,0 +1,40 @@
{
config,
pkgs,
nodePath,
...
}: {
age.secrets.initrd_host_ed25519_key = {
rekeyFile = nodePath + "/secrets/initrd_host_ed25519_key.age";
# Generate only an ssh-ed25519 private key
generator.script = {
pkgs,
lib,
...
}: ''
(exec 3>&1; ${pkgs.openssh}/bin/ssh-keygen -q -t ed25519 -N "" -f /proc/self/fd/3 <<<y >/dev/null 2>&1)
'';
};
boot.initrd.network.enable = true;
boot.initrd.network.ssh = {
enable = true;
port = 4;
hostKeys = [config.age.secrets.initrd_host_ed25519_key.path];
};
# Make sure that there is always a valid initrd hostkey available that can be installed into
# the initrd. When bootstrapping a system (or re-installing), agenix cannot succeed in decrypting
# whatever is given, since the correct hostkey doesn't even exist yet. We still require
# a valid hostkey to be available so that the initrd can be generated successfully.
# The correct initrd host-key will be installed with the next update after the host is booted
# for the first time, and the secrets were rekeyed for the the new host identity.
system.activationScripts.agenixEnsureInitrdHostkey = {
text = ''
[[ -e ${config.age.secrets.initrd_host_ed25519_key.path} ]] \
|| ${pkgs.openssh}/bin/ssh-keygen -t ed25519 -N "" -f ${config.age.secrets.initrd_host_ed25519_key.path}
'';
deps = ["agenixInstall"];
};
system.activationScripts.agenixChown.deps = ["agenixEnsureInitrdHostkey"];
}

View file

@ -0,0 +1,21 @@
{pkgs, ...}: {
systemd.network.wait-online.anyInterface = true;
services = {
tlp.enable = true;
physlock.enable = true;
logind = {
lidSwitch = "ignore";
lidSwitchDocked = "ignore";
lidSwitchExternalPower = "ignore";
extraConfig = ''
HandlePowerKey=suspend
HandleSuspendKey=suspend
HandleHibernateKey=suspend
PowerKeyIgnoreInhibited=yes
SuspendKeyIgnoreInhibited=yes
HibernateKeyIgnoreInhibited=yes
'';
};
};
}

View file

@ -0,0 +1,35 @@
{
lib,
pkgs,
...
}: {
environment.systemPackages = with pkgs; [pulseaudio pulsemixer];
sound.enable = false; # ALSA
hardware.pulseaudio.enable = lib.mkForce false;
security.rtkit.enable = true;
services.pipewire = {
enable = true;
alsa.enable = true;
jack.enable = true;
pulse.enable = true;
media-session.enable = false;
wireplumber.enable = true;
config = {
pipewire."context.properties"."default.clock.allowed-rates" = [
44100
48000
88200
96000
176400
192000
358000
384000
716000
768000
];
pipewire-pulse."stream.properties"."resample.quality" = 15;
client."stream.properties"."resample.quality" = 15;
client-rt."stream.properties"."resample.quality" = 15;
};
};
}

30
modules/optional/zfs.nix Normal file
View file

@ -0,0 +1,30 @@
{
config,
lib,
pkgs,
...
}: {
boot.supportedFilesystems = ["zfs"];
boot.kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages;
# The root pool should never be imported forcefully.
# Failure to import is important to notice!
boot.zfs.forceImportRoot = false;
environment.systemPackages = with pkgs; [zfs];
services.zfs = {
autoScrub = {
enable = true;
interval = "weekly";
};
trim = {
enable = true;
interval = "weekly";
};
};
services.telegraf.extraConfig.inputs = lib.mkIf config.services.telegraf.enable {
zfs.poolMetrics = true;
};
}

View file

@ -1,25 +0,0 @@
{
lib,
nodes,
...
}: {
extra.wireguard.proxy-sentinel.client.via = "sentinel";
networking.nftables.firewall = {
zones = lib.mkForce {
proxy-sentinel.interfaces = ["proxy-sentinel"];
sentinel = {
parent = "proxy-sentinel";
ipv4Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv4];
ipv6Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv6];
};
};
rules = lib.mkForce {
sentinel-to-local = {
from = ["sentinel"];
to = ["local"];
};
};
};
}

View file

@ -11,7 +11,6 @@
attrNames
concatMap
elem
filter
mdDoc
mkOption
mkOptionType
@ -37,7 +36,7 @@ in {
allNodes = attrNames colmenaNodes;
isColmenaNode = elem nodeName allNodes;
foreignConfigs = concatMap (n: colmenaNodes.${n}.config.nodes.${nodeName} or []) allNodes;
toplevelAttrs = ["age" "providedDomains" "networking" "systemd" "services"];
toplevelAttrs = ["age" "networking" "systemd" "services"];
in
optionalAttrs isColmenaNode (mergeToplevelConfigs toplevelAttrs (
foreignConfigs

19
modules/repo/meta.nix Normal file
View file

@ -0,0 +1,19 @@
{}
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way
# TODO define special args in a more documented and readOnly accessible way

View file

@ -88,7 +88,7 @@ in {
# at least via its parent folder so it can access relative files.
nix.extraOptions = mkIf cfg.defineNixExtraBuiltins ''
plugin-files = ${pkgs.nix-plugins}/lib/nix/plugins
extra-builtins-file = ${../nix}/extra-builtins.nix
extra-builtins-file = ${inputs.self.outPath}/nix/extra-builtins.nix
'';
};
}

View file

@ -0,0 +1,59 @@
{
config,
lib,
...
}: let
inherit
(lib)
assertMsg
filter
genAttrs
hasInfix
head
mdDoc
mkIf
mkOption
removeSuffix
types
;
in {
options.security.acme.wildcardDomains = mkOption {
default = [];
example = ["example.org"];
type = types.listOf types.str;
description = mdDoc ''
All domains for which a wildcard certificate will be generated.
This will define the given `security.acme.certs` and set `extraDomainNames` correctly,
but does not fill any options such as credentials or dnsProvider. These have to be set
individually for each cert by the user or via `security.acme.defaults`.
'';
};
options.services.nginx.virtualHosts = mkOption {
type = types.attrsOf (types.submodule (submod: {
options.useACMEWildcardHost = mkOption {
type = types.bool;
default = false;
description = mdDoc ''Automatically set useACMEHost with the correct wildcard domain for the virtualHosts's main domain.'';
};
config = let
# This retrieves all matching wildcard certs that would include
# the corresponding domain. If no such domain is defined in
# security.acme.wildcardDomains, an assertion is triggered.
domain = submod.config._module.args.name;
matchingCerts =
filter
(x: !hasInfix "." (removeSuffix ".${x}" domain))
config.security.acme.wildcardDomains;
in
mkIf submod.config.useACMEWildcardHost {
useACMEHost = assert assertMsg (matchingCerts != []) "No wildcard certificate was defined that matches ${domain}";
head matchingCerts;
};
}));
};
config.security.acme.certs = genAttrs config.security.acme.wildcardDomains (domain: {
extraDomainNames = ["*.${domain}"];
});
}