refactor: move vm definitions to own files

This commit is contained in:
oddlama 2023-06-11 14:42:20 +02:00
parent d4b7051091
commit 91306427ba
No known key found for this signature in database
GPG key ID: 14EFE510775FE39A
10 changed files with 367 additions and 354 deletions

28
flake.lock generated
View file

@ -31,11 +31,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1686343990, "lastModified": 1686423850,
"narHash": "sha256-/XkX73eAccg0l+2plLpDQHX4bl4sk2enSRwxUzuCcsc=", "narHash": "sha256-gssXzuMTkGxIcRnkri676GiOURsiBQ2eqLw7RX0kKmA=",
"owner": "oddlama", "owner": "oddlama",
"repo": "agenix-rekey", "repo": "agenix-rekey",
"rev": "1dd5cf245e842c4b698b537a7097c417f2912efe", "rev": "4c01269e9177e71f536dfe95a0cf21bedc58f797",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -210,11 +210,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1686342731, "lastModified": 1686391840,
"narHash": "sha256-GwCwviXcc5nrewuFwtsrxys8srrZcI+m8hdIGOt+fHY=", "narHash": "sha256-5S0APl6Mfm6a37taHwvuf11UHnAX0+PnoWQbsYbMUnc=",
"owner": "nix-community", "owner": "nix-community",
"repo": "home-manager", "repo": "home-manager",
"rev": "0945875a2a20de314093b0f9d4d5448e9b4fdccb", "rev": "0144ac418ef633bfc9dbd89b8c199ad3a617c59f",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -260,11 +260,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1686244773, "lastModified": 1686444102,
"narHash": "sha256-AtS5u3Qfrvtd1OiaRugEWKymbm6kwd7DGYiCiV8x3/U=", "narHash": "sha256-6J+pkUauanh6qfvyD80ngYZSyUmdmngMaO4TFY2Z0OA=",
"owner": "astro", "owner": "astro",
"repo": "microvm.nix", "repo": "microvm.nix",
"rev": "8f759ded0bbc7728738b064516a879b36ee115b9", "rev": "551239936a1c86479f6026658c4d1f1a3635d286",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -311,11 +311,11 @@
}, },
"nixos-hardware": { "nixos-hardware": {
"locked": { "locked": {
"lastModified": 1686217350, "lastModified": 1686452266,
"narHash": "sha256-Nb9b3m/GEK8jyFsYfUkXGsqj6rH05GgJ2QWcNNbK7dw=", "narHash": "sha256-zLKiX0iu6jZFeZDpR1gE6fNyMr8eiM8GLnj9SoUCjFs=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixos-hardware", "repo": "nixos-hardware",
"rev": "e4b34b90f27696ec3965fa15dcbacc351293dc67", "rev": "2a807ad6e8dc458db08588b78cc3c0f0ec4ff321",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -347,8 +347,8 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1686226982, "lastModified": 1686319658,
"narHash": "sha256-jsVFb4XxVdHvCA/OAuJA8tdPVf+jvm/eZLUZS4FoBGY=", "narHash": "sha256-Xb+zN3LkyOEKH9ydJEy3h0ch0MNEf0G7tmPbhy6r080=",
"type": "git", "type": "git",
"url": "file:///root/projects/nixpkgs-test" "url": "file:///root/projects/nixpkgs-test"
}, },

View file

@ -15,8 +15,8 @@
../../../modules/extra.nix ../../../modules/extra.nix
../../../modules/interface-naming.nix ../../../modules/interface-naming.nix
../../../modules/microvms.nix ../../../modules/microvms.nix
../../../modules/wireguard.nix
../../../modules/repo.nix ../../../modules/repo.nix
../../../modules/wireguard.nix
]; ];
home-manager = { home-manager = {

View file

@ -14,18 +14,6 @@
mkForce mkForce
; ;
in { in {
# TODO needed until https://github.com/NixOS/nixpkgs/issues/236146 is resolved
boot.initrd.systemd = {
services.systemd-networkd = {
before = ["initrd-switch-root.target"];
conflicts = ["initrd-switch-root.target"];
};
sockets.systemd-networkd = {
before = ["initrd-switch-root.target"];
conflicts = ["initrd-switch-root.target"];
};
};
networking = { networking = {
hostName = nodeName; hostName = nodeName;
useDHCP = mkForce false; useDHCP = mkForce false;

View file

@ -203,7 +203,7 @@
lib.warnIf lib.warnIf
((nInit + nHosts) > 0.3 * capacity) ((nInit + nHosts) > 0.3 * capacity)
"assignIps: hash stability may be degraded since utilization is >30%" "assignIps: hash stability may be degraded since utilization is >30%"
(builtins.sort (a: b: a < b) hosts); (builtins.sort builtins.lessThan hosts);
# Generates a hash (i.e. offset value) for a given hostname # Generates a hash (i.e. offset value) for a given hostname
hashElem = x: hashElem = x:
builtins.bitAnd (capacity - 1) builtins.bitAnd (capacity - 1)
@ -289,7 +289,7 @@
lib.warnIf lib.warnIf
((nInit + nHosts) > 0.3 * capacity) ((nInit + nHosts) > 0.3 * capacity)
"assignMacs: hash stability may be degraded since utilization is >30%" "assignMacs: hash stability may be degraded since utilization is >30%"
(builtins.sort (a: b: a < b) hosts); (builtins.sort builtins.lessThan hosts);
# Generates a hash (i.e. offset value) for a given hostname # Generates a hash (i.e. offset value) for a given hostname
hashElem = x: hashElem = x:
builtins.bitAnd (capacity - 1) builtins.bitAnd (capacity - 1)

View file

@ -21,7 +21,7 @@ in {
dependencies = [ dependencies = [
# TODO allow defining these from other nodes like nodes.sentinel.age.secrets....dependenices = []; # TODO allow defining these from other nodes like nodes.sentinel.age.secrets....dependenices = [];
nodes.ward.config.age.secrets.loki-basic-auth-password nodes.ward.config.age.secrets.loki-basic-auth-password
nodes.ward-test.config.age.secrets.loki-basic-auth-password nodes.ward-grafana.config.age.secrets.loki-basic-auth-password
]; ];
script = { script = {
pkgs, pkgs,
@ -47,10 +47,10 @@ in {
}; };
services.caddy = let services.caddy = let
authDomain = nodes.ward-nginx.config.services.kanidm.serverSettings.domain; authDomain = nodes.ward-kanidm.config.services.kanidm.serverSettings.domain;
authPort = lib.last (lib.splitString ":" nodes.ward-nginx.config.services.kanidm.serverSettings.bindaddress); authPort = lib.last (lib.splitString ":" nodes.ward-kanidm.config.services.kanidm.serverSettings.bindaddress);
grafanaDomain = nodes.ward-test.config.services.grafana.settings.server.domain; grafanaDomain = nodes.ward-grafana.config.services.grafana.settings.server.domain;
grafanaPort = toString nodes.ward-test.config.services.grafana.settings.server.http_port; grafanaPort = toString nodes.ward-grafana.config.services.grafana.settings.server.http_port;
lokiDomain = "loki.${personalDomain}"; lokiDomain = "loki.${personalDomain}";
lokiPort = toString nodes.ward-loki.config.services.loki.configuration.server.http_listen_port; lokiPort = toString nodes.ward-loki.config.services.loki.configuration.server.http_listen_port;
in { in {
@ -120,13 +120,14 @@ in {
# TODO move subconfigs to the relevant hosts instead. # TODO move subconfigs to the relevant hosts instead.
# -> have something like merged config nodes.<name>.... # -> have something like merged config nodes.<name>....
# -> needs to be in a way that doesn't trigger infinite recursion
virtualHosts.${authDomain} = { virtualHosts.${authDomain} = {
useACMEHost = config.lib.extra.matchingWildcardCert authDomain; useACMEHost = config.lib.extra.matchingWildcardCert authDomain;
extraConfig = '' extraConfig = ''
encode zstd gzip encode zstd gzip
reverse_proxy { reverse_proxy {
to https://${nodes.ward-nginx.config.extra.wireguard.proxy-sentinel.ipv4}:${authPort} to https://${nodes.ward-kanidm.config.extra.wireguard.proxy-sentinel.ipv4}:${authPort}
transport http { transport http {
tls_insecure_skip_verify tls_insecure_skip_verify
} }
@ -139,7 +140,7 @@ in {
extraConfig = '' extraConfig = ''
encode zstd gzip encode zstd gzip
reverse_proxy { reverse_proxy {
to http://${nodes.ward-test.config.extra.wireguard.proxy-sentinel.ipv4}:${grafanaPort} to http://${nodes.ward-grafana.config.extra.wireguard.proxy-sentinel.ipv4}:${grafanaPort}
} }
''; '';
}; };

View file

@ -14,6 +14,5 @@
./net.nix ./net.nix
./acme.nix ./acme.nix
./caddy.nix ./caddy.nix
#./nginx.nix
]; ];
} }

View file

@ -1,17 +1,8 @@
{ {
config, config,
nodes,
nixos-hardware, nixos-hardware,
nodeName,
pkgs,
... ...
}: let }: {
inherit (nodes.sentinel.config.repo.secrets.local) personalDomain;
authDomain = "auth.${personalDomain}";
grafanaDomain = "grafana.${personalDomain}";
lokiDomain = "loki.${personalDomain}";
lokiDir = "/var/lib/loki";
in {
imports = [ imports = [
nixos-hardware.common-cpu-intel nixos-hardware.common-cpu-intel
nixos-hardware.common-pc-ssd nixos-hardware.common-pc-ssd
@ -26,318 +17,31 @@ in {
./fs.nix ./fs.nix
./net.nix ./net.nix
./promtail.nix ./promtail.nix
./kanidm.nix
./grafana.nix
./loki.nix
]; ];
boot.initrd.availableKernelModules = ["xhci_pci" "ahci" "nvme" "usbhid" "usb_storage" "sd_mod" "sdhci_pci" "r8169"]; boot.initrd.availableKernelModules = ["xhci_pci" "ahci" "nvme" "usbhid" "usb_storage" "sd_mod" "sdhci_pci" "r8169"];
extra.microvms.vms = let #ddclient = defineVm;
defineVm = { #kanidm = defineVm;
system = "x86_64-linux"; #gitea/forgejo = defineVm;
autostart = true; #vaultwarden = defineVm;
zfs = { #samba+wsdd = defineVm;
enable = true; #fasten-health = defineVm;
pool = "rpool"; #immich = defineVm;
}; #paperless = defineVm;
}; #radicale = defineVm;
in { #minecraft = defineVm;
test = defineVm;
#ddclient = defineVm;
nginx = defineVm;
loki = defineVm;
#kanidm = defineVm;
#gitea/forgejo = defineVm;
#vaultwarden = defineVm;
#samba+wsdd = defineVm;
#fasten-health = defineVm;
#immich = defineVm;
#paperless = defineVm;
#radicale = defineVm;
#minecraft = defineVm;
#grafana #prometheus
#loki #influxdb
#maddy = defineVm; #maddy = defineVm;
#anonaddy = defineVm; #anonaddy = defineVm;
#automatic1111 = defineVm; #automatic1111 = defineVm;
#invokeai = defineVm; #invokeai = defineVm;
};
microvm.vms.test.config = {
lib,
config,
parentNodeName,
...
}: {
age.rekey.hostPubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBXXjI6uB26xOF0DPy/QyLladoGIKfAtofyqPgIkCH/g";
extra.wireguard.proxy-sentinel.client.via = "sentinel";
networking.nftables.firewall = {
zones = lib.mkForce {
proxy-sentinel.interfaces = ["proxy-sentinel"];
sentinel = {
parent = "proxy-sentinel";
ipv4Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv4];
ipv6Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv6];
};
};
rules = lib.mkForce {
sentinel-to-local = {
from = ["sentinel"];
to = ["local"];
allowedTCPPorts = [3001];
};
};
};
age.secrets.grafana-secret-key = {
rekeyFile = ./secrets/grafana-secret-key.age;
mode = "440";
group = "grafana";
};
age.secrets.loki-basic-auth-password = {
rekeyFile = ./secrets/loki-basic-auth-password.age;
generator = "alnum";
mode = "440";
group = "grafana";
};
services.grafana = {
enable = true;
settings = {
analytics.reporting_enabled = false;
users.allow_sign_up = false;
server = {
domain = grafanaDomain;
root_url = "https://${config.services.grafana.settings.server.domain}";
enforce_domain = true;
enable_gzip = true;
http_addr = config.extra.wireguard.proxy-sentinel.ipv4;
http_port = 3001;
};
security = {
disable_initial_admin_creation = true;
secret_key = "$__file{${config.age.secrets.grafana-secret-key.path}}";
cookie_secure = true;
disable_gravatar = true;
hide_version = true;
};
auth.disable_login_form = true;
"auth.generic_oauth" = {
enabled = true;
name = "Kanidm";
icon = "signin";
allow_sign_up = true;
auto_login = true;
client_id = "grafana";
#client_secret = "$__file{${config.age.secrets.grafana-oauth-client-secret.path}}";
client_secret = "r6Yk5PPSXFfYDPpK6TRCzXK8y1rTrfcb8F7wvNC5rZpyHTMF"; # TODO temporary test not a real secret
scopes = "openid email profile";
login_attribute_path = "prefered_username";
auth_url = "https://${authDomain}/ui/oauth2";
token_url = "https://${authDomain}/oauth2/token";
api_url = "https://${authDomain}/oauth2/openid/grafana/userinfo";
use_pkce = true;
# Allow mapping oauth2 roles to server admin
allow_assign_grafana_admin = true;
role_attribute_path = "contains(scopes[*], 'server_admin') && 'GrafanaAdmin' || contains(scopes[*], 'admin') && 'Admin' || contains(scopes[*], 'editor') && 'Editor' || 'Viewer'";
};
};
provision = {
enable = true;
datasources.settings.datasources = [
#{
# name = "Prometheus";
# type = "prometheus";
# url = "http://127.0.0.1:9090";
# orgId = 1;
#}
{
name = "Loki";
type = "loki";
access = "proxy";
url = "https://${lokiDomain}";
orgId = 1;
basicAuth = true;
basicAuthUser = nodeName;
secureJsonData.basicAuthPassword = "$__file{${config.age.secrets.loki-basic-auth-password.path}}";
}
];
};
};
};
microvm.vms.nginx.config = {
lib,
config,
...
}: {
age.rekey.hostPubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN2TxWynLb8V9SP45kFqsoCWhe/dG8N1xWNuJG5VQndq";
extra.wireguard.proxy-sentinel.client.via = "sentinel";
networking.nftables.firewall = {
zones = lib.mkForce {
proxy-sentinel.interfaces = ["proxy-sentinel"];
sentinel = {
parent = "proxy-sentinel";
ipv4Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv4];
ipv6Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv6];
};
};
rules = lib.mkForce {
sentinel-to-local = {
from = ["sentinel"];
to = ["local"];
allowedTCPPorts = [8300];
};
};
};
age.secrets."kanidm-self-signed.crt" = {
rekeyFile = ./secrets/kanidm-self-signed.crt.age;
mode = "440";
group = "kanidm";
};
age.secrets."kanidm-self-signed.key" = {
rekeyFile = ./secrets/kanidm-self-signed.key.age;
mode = "440";
group = "kanidm";
};
services.kanidm = {
enableServer = true;
# enablePAM = true;
serverSettings = {
domain = authDomain;
origin = "https://${config.services.kanidm.serverSettings.domain}";
tls_chain = config.age.secrets."kanidm-self-signed.crt".path;
tls_key = config.age.secrets."kanidm-self-signed.key".path;
bindaddress = "${config.extra.wireguard.proxy-sentinel.ipv4}:8300";
trust_x_forward_for = true;
};
};
environment.systemPackages = [pkgs.kanidm];
services.kanidm = {
enableClient = true;
clientSettings = {
uri = config.services.kanidm.serverSettings.origin;
verify_ca = true;
verify_hostnames = true;
};
};
};
microvm.vms.loki.config = {
lib,
config,
parentNodeName,
utils,
...
}: {
age.rekey.hostPubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICDDvvF3+KwfoZrPAUAt2HS7y5FM9S5Mr1iRkBUqoXno";
extra.wireguard.proxy-sentinel.client.via = "sentinel";
networking.nftables.firewall = {
zones = lib.mkForce {
proxy-sentinel.interfaces = ["proxy-sentinel"];
sentinel = {
parent = "proxy-sentinel";
ipv4Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv4];
ipv6Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv6];
};
};
rules = lib.mkForce {
sentinel-to-local = {
from = ["sentinel"];
to = ["local"];
allowedTCPPorts = [3100];
};
};
};
services.loki = {
enable = true;
configuration = {
analytics.reporting_enabled = false;
auth_enabled = false;
server = {
http_listen_address = config.extra.wireguard.proxy-sentinel.ipv4;
http_listen_port = 3100;
log_level = "warn";
};
ingester = {
lifecycler = {
interface_names = ["proxy-sentinel"];
ring = {
kvstore.store = "inmemory";
replication_factor = 1;
};
final_sleep = "0s";
};
chunk_idle_period = "5m";
chunk_retain_period = "30s";
};
schema_config.configs = [
{
from = "2023-06-01";
store = "tsdb";
object_store = "filesystem";
schema = "v12";
index = {
prefix = "index_";
period = "24h";
};
}
];
storage_config = {
tsdb_shipper = {
active_index_directory = "${lokiDir}/tsdb-index";
cache_location = "${lokiDir}/tsdb-cache";
cache_ttl = "24h";
shared_store = "filesystem";
};
filesystem.directory = "${lokiDir}/chunks";
};
# Do not accept new logs that are ingressed when they are actually already old.
limits_config = {
reject_old_samples = true;
reject_old_samples_max_age = "168h";
};
# Do not delete old logs automatically
table_manager = {
retention_deletes_enabled = false;
retention_period = "0s";
};
compactor = {
working_directory = lokiDir;
shared_store = "filesystem";
compactor_ring.kvstore.store = "inmemory";
};
};
};
# TODO this for other vms and services too?
systemd.services.loki.after = ["sys-subsystem-net-devices-${utils.escapeSystemdPath "proxy-sentinel"}.device"];
};
} }

127
hosts/ward/grafana.nix Normal file
View file

@ -0,0 +1,127 @@
{
extra.microvms.vms.grafana = {
system = "x86_64-linux";
autostart = true;
zfs = {
enable = true;
pool = "rpool";
};
};
microvm.vms.grafana.config = {
config,
lib,
nodeName,
nodes,
utils,
...
}: {
age.rekey.hostPubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBXXjI6uB26xOF0DPy/QyLladoGIKfAtofyqPgIkCH/g";
extra.wireguard.proxy-sentinel.client.via = "sentinel";
networking.nftables.firewall = {
zones = lib.mkForce {
proxy-sentinel.interfaces = ["proxy-sentinel"];
sentinel = {
parent = "proxy-sentinel";
ipv4Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv4];
ipv6Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv6];
};
};
rules = lib.mkForce {
sentinel-to-local = {
from = ["sentinel"];
to = ["local"];
allowedTCPPorts = [3001];
};
};
};
age.secrets.grafana-secret-key = {
rekeyFile = ./secrets/grafana-secret-key.age;
mode = "440";
group = "grafana";
};
age.secrets.loki-basic-auth-password = {
rekeyFile = ./secrets/loki-basic-auth-password.age;
generator = "alnum";
mode = "440";
group = "grafana";
};
services.grafana = {
enable = true;
settings = {
analytics.reporting_enabled = false;
users.allow_sign_up = false;
server = {
domain = "grafana.${nodes.sentinel.config.repo.secrets.local.personalDomain}";
root_url = "https://${config.services.grafana.settings.server.domain}";
enforce_domain = true;
enable_gzip = true;
http_addr = config.extra.wireguard.proxy-sentinel.ipv4;
http_port = 3001;
};
security = {
disable_initial_admin_creation = true;
secret_key = "$__file{${config.age.secrets.grafana-secret-key.path}}";
cookie_secure = true;
disable_gravatar = true;
hide_version = true;
};
auth.disable_login_form = true;
"auth.generic_oauth" = let
authDomain = nodes.ward-kanidm.config.services.kanidm.serverSettings.domain;
in {
enabled = true;
name = "Kanidm";
icon = "signin";
allow_sign_up = true;
auto_login = true;
client_id = "grafana";
#client_secret = "$__file{${config.age.secrets.grafana-oauth-client-secret.path}}";
client_secret = "r6Yk5PPSXFfYDPpK6TRCzXK8y1rTrfcb8F7wvNC5rZpyHTMF"; # TODO temporary test not a real secret
scopes = "openid email profile";
login_attribute_path = "prefered_username";
auth_url = "https://${authDomain}/ui/oauth2";
token_url = "https://${authDomain}/oauth2/token";
api_url = "https://${authDomain}/oauth2/openid/grafana/userinfo";
use_pkce = true;
# Allow mapping oauth2 roles to server admin
allow_assign_grafana_admin = true;
role_attribute_path = "contains(scopes[*], 'server_admin') && 'GrafanaAdmin' || contains(scopes[*], 'admin') && 'Admin' || contains(scopes[*], 'editor') && 'Editor' || 'Viewer'";
};
};
provision = {
enable = true;
datasources.settings.datasources = [
#{
# name = "Prometheus";
# type = "prometheus";
# url = "http://127.0.0.1:9090";
# orgId = 1;
#}
{
name = "Loki";
type = "loki";
access = "proxy";
url = "https://loki.${nodes.sentinel.config.repo.secrets.local.personalDomain}";
orgId = 1;
basicAuth = true;
basicAuthUser = nodeName;
secureJsonData.basicAuthPassword = "$__file{${config.age.secrets.loki-basic-auth-password.path}}";
}
];
};
};
systemd.services.grafana.after = ["sys-subsystem-net-devices-${utils.escapeSystemdPath "proxy-sentinel"}.device"];
};
}

81
hosts/ward/kanidm.nix Normal file
View file

@ -0,0 +1,81 @@
{
extra.microvms.vms.kanidm = {
system = "x86_64-linux";
autostart = true;
zfs = {
enable = true;
pool = "rpool";
};
};
microvm.vms.kanidm.config = {
config,
lib,
nodes,
pkgs,
utils,
...
}: {
age.rekey.hostPubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN2TxWynLb8V9SP45kFqsoCWhe/dG8N1xWNuJG5VQndq";
extra.wireguard.proxy-sentinel.client.via = "sentinel";
# TODO this as includable module?
networking.nftables.firewall = {
zones = lib.mkForce {
proxy-sentinel.interfaces = ["proxy-sentinel"];
sentinel = {
parent = "proxy-sentinel";
ipv4Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv4];
ipv6Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv6];
};
};
rules = lib.mkForce {
sentinel-to-local = {
from = ["sentinel"];
to = ["local"];
allowedTCPPorts = [8300];
};
};
};
age.secrets."kanidm-self-signed.crt" = {
rekeyFile = ./secrets/kanidm-self-signed.crt.age;
mode = "440";
group = "kanidm";
};
age.secrets."kanidm-self-signed.key" = {
rekeyFile = ./secrets/kanidm-self-signed.key.age;
mode = "440";
group = "kanidm";
};
services.kanidm = {
enableServer = true;
# enablePAM = true;
serverSettings = {
domain = "auth.${nodes.sentinel.config.repo.secrets.local.personalDomain}";
origin = "https://${config.services.kanidm.serverSettings.domain}";
tls_chain = config.age.secrets."kanidm-self-signed.crt".path;
tls_key = config.age.secrets."kanidm-self-signed.key".path;
bindaddress = "${config.extra.wireguard.proxy-sentinel.ipv4}:8300";
trust_x_forward_for = true;
};
};
environment.systemPackages = [pkgs.kanidm];
services.kanidm = {
enableClient = true;
clientSettings = {
uri = config.services.kanidm.serverSettings.origin;
verify_ca = true;
verify_hostnames = true;
};
};
systemd.services.kanidm.after = ["sys-subsystem-net-devices-${utils.escapeSystemdPath "proxy-sentinel"}.device"];
};
}

113
hosts/ward/loki.nix Normal file
View file

@ -0,0 +1,113 @@
{
extra.microvms.vms.loki = {
system = "x86_64-linux";
autostart = true;
zfs = {
enable = true;
pool = "rpool";
};
};
microvm.vms.loki.config = {
config,
lib,
nodes,
utils,
...
}: {
age.rekey.hostPubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICDDvvF3+KwfoZrPAUAt2HS7y5FM9S5Mr1iRkBUqoXno";
extra.wireguard.proxy-sentinel.client.via = "sentinel";
networking.nftables.firewall = {
zones = lib.mkForce {
proxy-sentinel.interfaces = ["proxy-sentinel"];
sentinel = {
parent = "proxy-sentinel";
ipv4Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv4];
ipv6Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv6];
};
};
rules = lib.mkForce {
sentinel-to-local = {
from = ["sentinel"];
to = ["local"];
allowedTCPPorts = [3100];
};
};
};
services.loki = let
lokiDir = "/var/lib/loki";
in {
enable = true;
configuration = {
analytics.reporting_enabled = false;
auth_enabled = false;
server = {
http_listen_address = config.extra.wireguard.proxy-sentinel.ipv4;
http_listen_port = 3100;
log_level = "warn";
};
ingester = {
lifecycler = {
interface_names = ["proxy-sentinel"];
ring = {
kvstore.store = "inmemory";
replication_factor = 1;
};
final_sleep = "0s";
};
chunk_idle_period = "5m";
chunk_retain_period = "30s";
};
schema_config.configs = [
{
from = "2023-06-01";
store = "tsdb";
object_store = "filesystem";
schema = "v12";
index = {
prefix = "index_";
period = "24h";
};
}
];
storage_config = {
tsdb_shipper = {
active_index_directory = "${lokiDir}/tsdb-index";
cache_location = "${lokiDir}/tsdb-cache";
cache_ttl = "24h";
shared_store = "filesystem";
};
filesystem.directory = "${lokiDir}/chunks";
};
# Do not accept new logs that are ingressed when they are actually already old.
limits_config = {
reject_old_samples = true;
reject_old_samples_max_age = "168h";
};
# Do not delete old logs automatically
table_manager = {
retention_deletes_enabled = false;
retention_period = "0s";
};
compactor = {
working_directory = lokiDir;
shared_store = "filesystem";
compactor_ring.kvstore.store = "inmemory";
};
};
};
systemd.services.loki.after = ["sys-subsystem-net-devices-${utils.escapeSystemdPath "proxy-sentinel"}.device"];
};
}