chore: format everything

This commit is contained in:
oddlama 2024-11-26 13:34:55 +01:00
parent deca311c68
commit 7ccd7856ee
No known key found for this signature in database
GPG key ID: 14EFE510775FE39A
162 changed files with 4750 additions and 3718 deletions

View file

@ -6,7 +6,8 @@
nodes,
minimal,
...
}: {
}:
{
imports = [
inputs.nixos-hardware.nixosModules.common-cpu-intel
inputs.nixos-hardware.nixosModules.common-pc-ssd
@ -24,7 +25,15 @@
nixpkgs.hostPlatform = "x86_64-linux";
boot.mode = "efi";
boot.initrd.availableKernelModules = ["xhci_pci" "ahci" "nvme" "usbhid" "usb_storage" "e1000e" "alx"];
boot.initrd.availableKernelModules = [
"xhci_pci"
"ahci"
"nvme"
"usbhid"
"usb_storage"
"e1000e"
"alx"
];
systemd.units."dev-tpmrm0.device".enable = false; # https://github.com/systemd/systemd/issues/33412
meta.promtail = {
@ -33,7 +42,9 @@
};
# Connect safely via wireguard to skip authentication
networking.hosts.${nodes.sentinel.config.wireguard.proxy-sentinel.ipv4} = [globals.services.influxdb.domain];
networking.hosts.${nodes.sentinel.config.wireguard.proxy-sentinel.ipv4} = [
globals.services.influxdb.domain
];
meta.telegraf = {
enable = true;
influxdb2 = {
@ -44,52 +55,54 @@
};
};
guests = let
mkGuest = guestName: {
enableStorageDataset ? false,
enableBunkerDataset ? false,
enablePaperlessDataset ? false,
...
}: {
autostart = true;
zfs."/state" = {
# TODO make one option out of that? and split into two readonly options automatically?
pool = "rpool";
dataset = "local/guests/${guestName}";
};
zfs."/persist" = {
pool = "rpool";
dataset = "safe/guests/${guestName}";
};
zfs."/storage" = lib.mkIf enableStorageDataset {
pool = "storage";
dataset = "safe/guests/${guestName}";
};
zfs."/bunker" = lib.mkIf enableBunkerDataset {
pool = "storage";
dataset = "bunker/guests/${guestName}";
};
zfs."/paperless" = lib.mkIf enablePaperlessDataset {
pool = "storage";
dataset = "bunker/paperless";
};
modules = [
../../config
./guests/common.nix
./guests/${guestName}.nix
guests =
let
mkGuest =
guestName:
{
node.secretsDir = ./secrets/${guestName};
networking.nftables.firewall = {
zones.untrusted.interfaces = [config.guests.${guestName}.networking.mainLinkName];
enableStorageDataset ? false,
enableBunkerDataset ? false,
enablePaperlessDataset ? false,
...
}:
{
autostart = true;
zfs."/state" = {
# TODO make one option out of that? and split into two readonly options automatically?
pool = "rpool";
dataset = "local/guests/${guestName}";
};
}
];
};
zfs."/persist" = {
pool = "rpool";
dataset = "safe/guests/${guestName}";
};
zfs."/storage" = lib.mkIf enableStorageDataset {
pool = "storage";
dataset = "safe/guests/${guestName}";
};
zfs."/bunker" = lib.mkIf enableBunkerDataset {
pool = "storage";
dataset = "bunker/guests/${guestName}";
};
zfs."/paperless" = lib.mkIf enablePaperlessDataset {
pool = "storage";
dataset = "bunker/paperless";
};
modules = [
../../config
./guests/common.nix
./guests/${guestName}.nix
{
node.secretsDir = ./secrets/${guestName};
networking.nftables.firewall = {
zones.untrusted.interfaces = [ config.guests.${guestName}.networking.mainLinkName ];
};
}
];
};
mkMicrovm = guestName: opts: {
${guestName} =
mkGuest guestName opts
// {
mkMicrovm = guestName: opts: {
${guestName} = mkGuest guestName opts // {
backend = "microvm";
microvm = {
system = "x86_64-linux";
@ -102,13 +115,11 @@
inherit inputs minimal;
};
};
};
};
# deadnix: skip
mkContainer = guestName: opts: {
${guestName} =
mkGuest guestName opts
// {
# deadnix: skip
mkContainer = guestName: opts: {
${guestName} = mkGuest guestName opts // {
backend = "container";
container.macvlan = "lan";
extraSpecialArgs = {
@ -117,27 +128,27 @@
inherit inputs minimal;
};
};
};
in
};
in
lib.mkIf (!minimal) (
{}
// mkMicrovm "actual" {}
{ }
// mkMicrovm "actual" { }
// mkMicrovm "samba" {
enableStorageDataset = true;
enableBunkerDataset = true;
enablePaperlessDataset = true;
}
// mkMicrovm "grafana" {}
// mkMicrovm "influxdb" {}
// mkMicrovm "loki" {}
// mkMicrovm "grafana" { }
// mkMicrovm "influxdb" { }
// mkMicrovm "loki" { }
// mkMicrovm "paperless" {
enablePaperlessDataset = true;
}
// mkMicrovm "immich" {
enableStorageDataset = true;
}
// mkMicrovm "ai" {}
// mkMicrovm "minecraft" {}
// mkMicrovm "ai" { }
// mkMicrovm "minecraft" { }
#// mkMicrovm "firefly" {}
#// mkMicrovm "fasten-health" {}
);

View file

@ -2,9 +2,11 @@
config,
lib,
...
}: let
}:
let
inherit (config.repo.secrets.local) disks;
in {
in
{
disko.devices = {
disk =
{
@ -33,11 +35,9 @@ in {
zpool = {
rpool = lib.disko.zfs.mkZpool {
mode = "mirror";
datasets =
lib.disko.zfs.impermanenceZfsDatasets
// {
"safe/guests" = lib.disko.zfs.unmountable;
};
datasets = lib.disko.zfs.impermanenceZfsDatasets // {
"safe/guests" = lib.disko.zfs.unmountable;
};
};
storage = lib.disko.zfs.mkZpool {
mode = "raidz";
@ -48,7 +48,7 @@ in {
};
};
boot.initrd.systemd.services."zfs-import-storage".after = ["cryptsetup.target"];
boot.initrd.systemd.services."zfs-import-storage".after = [ "cryptsetup.target" ];
services.zrepl = {
enable = true;

View file

@ -3,12 +3,14 @@
globals,
nodes,
...
}: let
}:
let
actualDomain = "finance.${globals.domains.me}";
in {
in
{
wireguard.proxy-sentinel = {
client.via = "sentinel";
firewallRuleForNode.sentinel.allowedTCPPorts = [config.services.actual.settings.port];
firewallRuleForNode.sentinel.allowedTCPPorts = [ config.services.actual.settings.port ];
};
environment.persistence."/persist".directories = [
@ -22,7 +24,7 @@ in {
services.actual = {
enable = true;
settings.trustedProxies = [nodes.sentinel.config.wireguard.proxy-sentinel.ipv4];
settings.trustedProxies = [ nodes.sentinel.config.wireguard.proxy-sentinel.ipv4 ];
};
globals.services.actual.domain = actualDomain;
@ -35,7 +37,8 @@ in {
nodes.sentinel = {
services.nginx = {
upstreams.actual = {
servers."${config.wireguard.proxy-sentinel.ipv4}:${toString config.services.actual.settings.port}" = {};
servers."${config.wireguard.proxy-sentinel.ipv4}:${toString config.services.actual.settings.port}" =
{ };
extraConfig = ''
zone actual 64k;
keepalive 2;

View file

@ -2,18 +2,20 @@
config,
globals,
...
}: let
}:
let
openWebuiDomain = "chat.${globals.domains.me}";
in {
in
{
microvm.mem = 1024 * 16;
microvm.vcpu = 20;
wireguard.proxy-sentinel = {
client.via = "sentinel";
firewallRuleForNode.sentinel.allowedTCPPorts = [config.services.open-webui.port];
firewallRuleForNode.sentinel.allowedTCPPorts = [ config.services.open-webui.port ];
};
networking.firewall.allowedTCPPorts = [config.services.ollama.port];
networking.firewall.allowedTCPPorts = [ config.services.ollama.port ];
environment.persistence."/state".directories = [
{
@ -64,7 +66,7 @@ in {
nodes.sentinel = {
services.nginx = {
upstreams.open-webui = {
servers."${config.wireguard.proxy-sentinel.ipv4}:${toString config.services.open-webui.port}" = {};
servers."${config.wireguard.proxy-sentinel.ipv4}:${toString config.services.open-webui.port}" = { };
extraConfig = ''
zone open-webui 64k;
keepalive 2;
@ -79,7 +81,7 @@ in {
useACMEWildcardHost = true;
oauth2 = {
enable = true;
allowedGroups = ["access_openwebui"];
allowedGroups = [ "access_openwebui" ];
X-Email = "\${upstream_http_x_auth_request_preferred_username}@${globals.domains.personal}";
};
extraConfig = ''

View file

@ -4,10 +4,12 @@
lib,
nodes,
...
}: let
}:
let
sentinelCfg = nodes.sentinel.config;
wardWebProxyCfg = nodes.ward-web-proxy.config;
in {
in
{
meta.promtail = {
enable = true;
proxy = "sentinel";
@ -15,10 +17,11 @@ in {
# Connect safely via wireguard to skip http authentication
networking.hosts.${
if config.wireguard ? proxy-home
then wardWebProxyCfg.wireguard.proxy-home.ipv4
else sentinelCfg.wireguard.proxy-sentinel.ipv4
} = [globals.services.influxdb.domain];
if config.wireguard ? proxy-home then
wardWebProxyCfg.wireguard.proxy-home.ipv4
else
sentinelCfg.wireguard.proxy-sentinel.ipv4
} = [ globals.services.influxdb.domain ];
meta.telegraf = lib.mkIf (!config.boot.isContainer) {
enable = true;

View file

@ -4,18 +4,24 @@
nodes,
pkgs,
...
}: let
}:
let
wardWebProxyCfg = nodes.ward-web-proxy.config;
grafanaDomain = "grafana.${globals.domains.me}";
in {
in
{
wireguard.proxy-sentinel = {
client.via = "sentinel";
firewallRuleForNode.sentinel.allowedTCPPorts = [config.services.grafana.settings.server.http_port];
firewallRuleForNode.sentinel.allowedTCPPorts = [
config.services.grafana.settings.server.http_port
];
};
wireguard.proxy-home = {
client.via = "ward";
firewallRuleForNode.ward-web-proxy.allowedTCPPorts = [config.services.grafana.settings.server.http_port];
firewallRuleForNode.ward-web-proxy.allowedTCPPorts = [
config.services.grafana.settings.server.http_port
];
};
age.secrets.grafana-secret-key = {
@ -32,14 +38,14 @@ in {
age.secrets.grafana-influxdb-token-machines = {
generator.script = "alnum";
generator.tags = ["influxdb"];
generator.tags = [ "influxdb" ];
mode = "440";
group = "grafana";
};
age.secrets.grafana-influxdb-token-home = {
generator.script = "alnum";
generator.tags = ["influxdb"];
generator.tags = [ "influxdb" ];
mode = "440";
group = "grafana";
};
@ -60,9 +66,10 @@ in {
};
services.influxdb2.provision.organizations.machines.auths."grafana machines:telegraf (${config.node.name})" = {
readBuckets = ["telegraf"];
writeBuckets = ["telegraf"];
tokenFile = nodes.sire-influxdb.config.age.secrets."grafana-influxdb-token-machines-${config.node.name}".path;
readBuckets = [ "telegraf" ];
writeBuckets = [ "telegraf" ];
tokenFile =
nodes.sire-influxdb.config.age.secrets."grafana-influxdb-token-machines-${config.node.name}".path;
};
age.secrets."grafana-influxdb-token-home-${config.node.name}" = {
@ -72,9 +79,10 @@ in {
};
services.influxdb2.provision.organizations.home.auths."grafana home:home_assistant (${config.node.name})" = {
readBuckets = ["home_assistant"];
writeBuckets = ["home_assistant"];
tokenFile = nodes.sire-influxdb.config.age.secrets."grafana-influxdb-token-home-${config.node.name}".path;
readBuckets = [ "home_assistant" ];
writeBuckets = [ "home_assistant" ];
tokenFile =
nodes.sire-influxdb.config.age.secrets."grafana-influxdb-token-home-${config.node.name}".path;
};
};
@ -92,7 +100,8 @@ in {
services.nginx = {
upstreams.grafana = {
servers."${config.wireguard.proxy-sentinel.ipv4}:${toString config.services.grafana.settings.server.http_port}" = {};
servers."${config.wireguard.proxy-sentinel.ipv4}:${toString config.services.grafana.settings.server.http_port}" =
{ };
extraConfig = ''
zone grafana 64k;
keepalive 2;
@ -116,7 +125,8 @@ in {
nodes.ward-web-proxy = {
services.nginx = {
upstreams.grafana = {
servers."${config.wireguard.proxy-home.ipv4}:${toString config.services.grafana.settings.server.http_port}" = {};
servers."${config.wireguard.proxy-home.ipv4}:${toString config.services.grafana.settings.server.http_port}" =
{ };
extraConfig = ''
zone grafana 64k;
keepalive 2;

View file

@ -4,7 +4,8 @@
nodes,
pkgs,
...
}: let
}:
let
sentinelCfg = nodes.sentinel.config;
wardWebProxyCfg = nodes.ward-web-proxy.config;
immichDomain = "immich.${globals.domains.me}";
@ -127,12 +128,13 @@
serviceConfig = {
serviceConfig.Restart = "always";
after = ["podman-network-immich-default.service"];
requires = ["podman-network-immich-default.service"];
partOf = ["podman-compose-immich-root.target"];
wantedBy = ["podman-compose-immich-root.target"];
after = [ "podman-network-immich-default.service" ];
requires = [ "podman-network-immich-default.service" ];
partOf = [ "podman-compose-immich-root.target" ];
wantedBy = [ "podman-compose-immich-root.target" ];
};
in {
in
{
microvm.mem = 1024 * 12;
microvm.vcpu = 16;
@ -155,7 +157,7 @@ in {
system.activationScripts.agenixRooterDerivedSecrets = {
# Run after agenix has generated secrets
deps = ["agenix"];
deps = [ "agenix" ];
text = ''
immichClientSecret=$(< ${config.age.secrets.immich-oauth2-client-secret.path})
${pkgs.jq}/bin/jq --arg immichClientSecret "$immichClientSecret" '.oauth.clientSecret = $immichClientSecret' ${configFile} > ${processedConfigFile}
@ -165,14 +167,14 @@ in {
wireguard.proxy-sentinel = {
client.via = "sentinel";
firewallRuleForNode.sentinel.allowedTCPPorts = [2283];
firewallRuleForNode.sentinel.allowedTCPPorts = [ 2283 ];
};
wireguard.proxy-home = {
client.via = "ward";
firewallRuleForNode.ward-web-proxy.allowedTCPPorts = [2283];
firewallRuleForNode.ward-web-proxy.allowedTCPPorts = [ 2283 ];
};
networking.nftables.chains.forward.into-immich-container = {
after = ["conntrack"];
after = [ "conntrack" ];
rules = [
"iifname proxy-sentinel ip saddr ${sentinelCfg.wireguard.proxy-sentinel.ipv4} tcp dport 2283 accept"
"iifname proxy-home ip saddr ${wardWebProxyCfg.wireguard.proxy-home.ipv4} tcp dport 2283 accept"
@ -190,7 +192,7 @@ in {
nodes.sentinel = {
services.nginx = {
upstreams.immich = {
servers."${config.wireguard.proxy-sentinel.ipv4}:2283" = {};
servers."${config.wireguard.proxy-sentinel.ipv4}:2283" = { };
extraConfig = ''
zone immich 64k;
keepalive 2;
@ -222,7 +224,7 @@ in {
nodes.ward-web-proxy = {
services.nginx = {
upstreams.immich = {
servers."${config.wireguard.proxy-home.ipv4}:2283" = {};
servers."${config.wireguard.proxy-home.ipv4}:2283" = { };
extraConfig = ''
zone immich 64k;
keepalive 2;
@ -238,8 +240,7 @@ in {
locations."/" = {
proxyPass = "http://immich";
proxyWebsockets = true;
extraConfig = ''
'';
extraConfig = '''';
};
extraConfig = ''
client_max_body_size 50G;
@ -348,18 +349,16 @@ in {
"--ip=${ipImmichServer}"
];
};
systemd.services."podman-immich_server" =
serviceConfig
// {
unitConfig.UpheldBy = [
"podman-immich_postgres.service"
"podman-immich_redis.service"
];
};
systemd.services."podman-immich_server" = serviceConfig // {
unitConfig.UpheldBy = [
"podman-immich_postgres.service"
"podman-immich_redis.service"
];
};
# Networks
systemd.services."podman-network-immich-default" = {
path = [pkgs.podman];
path = [ pkgs.podman ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
@ -368,8 +367,8 @@ in {
script = ''
podman network inspect immich-default || podman network create immich-default --opt isolate=true --subnet=10.89.0.0/24 --disable-dns
'';
partOf = ["podman-compose-immich-root.target"];
wantedBy = ["podman-compose-immich-root.target"];
partOf = [ "podman-compose-immich-root.target" ];
wantedBy = [ "podman-compose-immich-root.target" ];
};
# Root service
@ -379,6 +378,6 @@ in {
unitConfig = {
Description = "Root target generated by compose2nix.";
};
wantedBy = ["multi-user.target"];
wantedBy = [ "multi-user.target" ];
};
}

View file

@ -5,20 +5,22 @@
nodes,
pkgs,
...
}: let
}:
let
sentinelCfg = nodes.sentinel.config;
wardCfg = nodes.ward.config;
influxdbDomain = "influxdb.${globals.domains.me}";
influxdbPort = 8086;
in {
in
{
wireguard.proxy-sentinel = {
client.via = "sentinel";
firewallRuleForNode.sentinel.allowedTCPPorts = [influxdbPort];
firewallRuleForNode.sentinel.allowedTCPPorts = [ influxdbPort ];
};
wireguard.proxy-home = {
client.via = "ward";
firewallRuleForNode.ward-web-proxy.allowedTCPPorts = [influxdbPort];
firewallRuleForNode.ward-web-proxy.allowedTCPPorts = [ influxdbPort ];
};
age.secrets.github-access-token = {
@ -28,7 +30,9 @@ in {
};
meta.telegraf.secrets."@GITHUB_ACCESS_TOKEN@" = config.age.secrets.github-access-token.path;
services.telegraf.extraConfig.outputs.influxdb_v2.urls = lib.mkForce ["http://localhost:${toString influxdbPort}"];
services.telegraf.extraConfig.outputs.influxdb_v2.urls = lib.mkForce [
"http://localhost:${toString influxdbPort}"
];
services.telegraf.extraConfig.inputs = {
github = {
@ -51,7 +55,7 @@ in {
nodes.sentinel = {
services.nginx = {
upstreams.influxdb = {
servers."${config.wireguard.proxy-sentinel.ipv4}:${toString influxdbPort}" = {};
servers."${config.wireguard.proxy-sentinel.ipv4}:${toString influxdbPort}" = { };
extraConfig = ''
zone influxdb 64k;
keepalive 2;
@ -61,35 +65,39 @@ in {
expectedBodyRegex = "InfluxDB";
};
};
virtualHosts.${influxdbDomain} = let
accessRules = ''
${lib.concatMapStrings (ip: "allow ${ip};\n") sentinelCfg.wireguard.proxy-sentinel.server.reservedAddresses}
deny all;
'';
in {
forceSSL = true;
useACMEWildcardHost = true;
locations."/" = {
proxyPass = "http://influxdb";
proxyWebsockets = true;
extraConfig = accessRules;
};
locations."/api/v2/write" = {
proxyPass = "http://influxdb/api/v2/write";
proxyWebsockets = true;
extraConfig = ''
${accessRules}
access_log off;
virtualHosts.${influxdbDomain} =
let
accessRules = ''
${lib.concatMapStrings (
ip: "allow ${ip};\n"
) sentinelCfg.wireguard.proxy-sentinel.server.reservedAddresses}
deny all;
'';
in
{
forceSSL = true;
useACMEWildcardHost = true;
locations."/" = {
proxyPass = "http://influxdb";
proxyWebsockets = true;
extraConfig = accessRules;
};
locations."/api/v2/write" = {
proxyPass = "http://influxdb/api/v2/write";
proxyWebsockets = true;
extraConfig = ''
${accessRules}
access_log off;
'';
};
};
};
};
};
nodes.ward-web-proxy = {
services.nginx = {
upstreams.influxdb = {
servers."${config.wireguard.proxy-home.ipv4}:${toString influxdbPort}" = {};
servers."${config.wireguard.proxy-home.ipv4}:${toString influxdbPort}" = { };
extraConfig = ''
zone influxdb 64k;
keepalive 2;
@ -99,28 +107,30 @@ in {
expectedBodyRegex = "InfluxDB";
};
};
virtualHosts.${influxdbDomain} = let
accessRules = ''
${lib.concatMapStrings (ip: "allow ${ip};\n") wardCfg.wireguard.proxy-home.server.reservedAddresses}
deny all;
'';
in {
forceSSL = true;
useACMEWildcardHost = true;
locations."/" = {
proxyPass = "http://influxdb";
proxyWebsockets = true;
extraConfig = accessRules;
};
locations."/api/v2/write" = {
proxyPass = "http://influxdb/api/v2/write";
proxyWebsockets = true;
extraConfig = ''
${accessRules}
access_log off;
virtualHosts.${influxdbDomain} =
let
accessRules = ''
${lib.concatMapStrings (ip: "allow ${ip};\n") wardCfg.wireguard.proxy-home.server.reservedAddresses}
deny all;
'';
in
{
forceSSL = true;
useACMEWildcardHost = true;
locations."/" = {
proxyPass = "http://influxdb";
proxyWebsockets = true;
extraConfig = accessRules;
};
locations."/api/v2/write" = {
proxyPass = "http://influxdb/api/v2/write";
proxyWebsockets = true;
extraConfig = ''
${accessRules}
access_log off;
'';
};
};
};
};
};
@ -166,12 +176,12 @@ in {
passwordFile = config.age.secrets.influxdb-admin-password.path;
tokenFile = config.age.secrets.influxdb-admin-token.path;
};
organizations.machines.buckets.telegraf = {};
organizations.home.buckets.home_assistant = {};
organizations.machines.buckets.telegraf = { };
organizations.home.buckets.home_assistant = { };
};
};
environment.systemPackages = [pkgs.influxdb2-cli];
environment.systemPackages = [ pkgs.influxdb2-cli ];
systemd.services.grafana.serviceConfig.RestartSec = "60"; # Retry every minute
}

View file

@ -3,19 +3,25 @@
globals,
nodes,
...
}: let
}:
let
sentinelCfg = nodes.sentinel.config;
wardWebProxyCfg = nodes.ward-web-proxy.config;
lokiDomain = "loki.${globals.domains.me}";
in {
in
{
wireguard.proxy-sentinel = {
client.via = "sentinel";
firewallRuleForNode.sentinel.allowedTCPPorts = [config.services.loki.configuration.server.http_listen_port];
firewallRuleForNode.sentinel.allowedTCPPorts = [
config.services.loki.configuration.server.http_listen_port
];
};
wireguard.proxy-home = {
client.via = "ward";
firewallRuleForNode.ward-web-proxy.allowedTCPPorts = [config.services.loki.configuration.server.http_listen_port];
firewallRuleForNode.ward-web-proxy.allowedTCPPorts = [
config.services.loki.configuration.server.http_listen_port
];
};
globals.services.loki.domain = lokiDomain;
@ -29,7 +35,8 @@ in {
services.nginx = {
upstreams.loki = {
servers."${config.wireguard.proxy-sentinel.ipv4}:${toString config.services.loki.configuration.server.http_listen_port}" = {};
servers."${config.wireguard.proxy-sentinel.ipv4}:${toString config.services.loki.configuration.server.http_listen_port}" =
{ };
extraConfig = ''
zone loki 64k;
keepalive 2;
@ -76,7 +83,8 @@ in {
services.nginx = {
upstreams.loki = {
servers."${config.wireguard.proxy-home.ipv4}:${toString config.services.loki.configuration.server.http_listen_port}" = {};
servers."${config.wireguard.proxy-home.ipv4}:${toString config.services.loki.configuration.server.http_listen_port}" =
{ };
extraConfig = ''
zone loki 64k;
keepalive 2;
@ -124,74 +132,76 @@ in {
];
topology.self.services.loki.info = "https://" + lokiDomain;
services.loki = let
lokiDir = "/var/lib/loki";
in {
enable = true;
configuration = {
analytics.reporting_enabled = false;
auth_enabled = false;
services.loki =
let
lokiDir = "/var/lib/loki";
in
{
enable = true;
configuration = {
analytics.reporting_enabled = false;
auth_enabled = false;
server = {
http_listen_address = "0.0.0.0";
http_listen_port = 3100;
log_level = "warn";
};
ingester = {
lifecycler = {
address = "127.0.0.1";
ring = {
kvstore.store = "inmemory";
replication_factor = 1;
};
final_sleep = "0s";
server = {
http_listen_address = "0.0.0.0";
http_listen_port = 3100;
log_level = "warn";
};
chunk_idle_period = "5m";
chunk_retain_period = "30s";
};
schema_config.configs = [
{
from = "2023-06-01";
store = "tsdb";
object_store = "filesystem";
schema = "v13";
index = {
prefix = "index_";
period = "24h";
ingester = {
lifecycler = {
address = "127.0.0.1";
ring = {
kvstore.store = "inmemory";
replication_factor = 1;
};
final_sleep = "0s";
};
}
];
storage_config = {
tsdb_shipper = {
active_index_directory = "${lokiDir}/tsdb-index";
cache_location = "${lokiDir}/tsdb-cache";
cache_ttl = "24h";
chunk_idle_period = "5m";
chunk_retain_period = "30s";
};
filesystem.directory = "${lokiDir}/chunks";
};
# Do not accept new logs that are ingressed when they are actually already old.
limits_config = {
reject_old_samples = true;
reject_old_samples_max_age = "168h";
allow_structured_metadata = false;
};
schema_config.configs = [
{
from = "2023-06-01";
store = "tsdb";
object_store = "filesystem";
schema = "v13";
index = {
prefix = "index_";
period = "24h";
};
}
];
# Do not delete old logs automatically
table_manager = {
retention_deletes_enabled = false;
retention_period = "0s";
};
storage_config = {
tsdb_shipper = {
active_index_directory = "${lokiDir}/tsdb-index";
cache_location = "${lokiDir}/tsdb-cache";
cache_ttl = "24h";
};
filesystem.directory = "${lokiDir}/chunks";
};
compactor = {
working_directory = lokiDir;
compactor_ring.kvstore.store = "inmemory";
# Do not accept new logs that are ingressed when they are actually already old.
limits_config = {
reject_old_samples = true;
reject_old_samples_max_age = "168h";
allow_structured_metadata = false;
};
# Do not delete old logs automatically
table_manager = {
retention_deletes_enabled = false;
retention_period = "0s";
};
compactor = {
working_directory = lokiDir;
compactor_ring.kvstore.store = "inmemory";
};
};
};
};
systemd.services.loki.serviceConfig.RestartSec = "60"; # Retry every minute
}

View file

@ -5,7 +5,8 @@
lib,
pkgs,
...
}: let
}:
let
inherit (lib) getExe;
minecraftDomain = "mc.${globals.domains.me}";
@ -13,7 +14,7 @@
minecraft-attach = pkgs.writeShellApplication {
name = "minecraft-attach";
runtimeInputs = [pkgs.tmux];
runtimeInputs = [ pkgs.tmux ];
text = ''
shopt -s nullglob
@ -40,9 +41,7 @@
};
helper-functions =
/*
bash
*/
# bash
''
################################################################
# General helper functions
@ -171,7 +170,7 @@
server-backup-script = pkgs.writeShellApplication {
name = "minecraft-backup";
runtimeInputs = [pkgs.rdiff-backup];
runtimeInputs = [ pkgs.rdiff-backup ];
text = ''
BACKUP_LOG_FILE="logs/backup.log"
BACKUP_TO="backups"
@ -199,7 +198,10 @@
server-start-script = pkgs.writeShellApplication {
name = "minecraft-server-start";
runtimeInputs = [pkgs.procps pkgs.gnugrep];
runtimeInputs = [
pkgs.procps
pkgs.gnugrep
];
text = ''
cd ${dataDir}/server
@ -252,7 +254,11 @@
server-update-script = pkgs.writeShellApplication {
name = "minecraft-server-update";
runtimeInputs = [pkgs.wget pkgs.curl pkgs.jq];
runtimeInputs = [
pkgs.wget
pkgs.curl
pkgs.jq
];
text = ''
cd ${dataDir}/server || exit 1
${helper-functions}
@ -285,7 +291,11 @@
proxy-update-script = pkgs.writeShellApplication {
name = "minecraft-proxy-update";
runtimeInputs = [pkgs.wget pkgs.curl pkgs.jq];
runtimeInputs = [
pkgs.wget
pkgs.curl
pkgs.jq
];
text = ''
cd ${dataDir}/proxy || exit 1
${helper-functions}
@ -313,8 +323,8 @@
User = "minecraft";
# Hardening
AmbientCapabilities = ["CAP_KILL"];
CapabilityBoundingSet = ["CAP_KILL"];
AmbientCapabilities = [ "CAP_KILL" ];
CapabilityBoundingSet = [ "CAP_KILL" ];
LockPersonality = true;
NoNewPrivileges = true;
PrivateDevices = true;
@ -335,16 +345,21 @@
SystemCallArchitectures = "native";
UMask = "0027";
};
in {
in
{
microvm.mem = 1024 * 24;
microvm.vcpu = 16;
wireguard.proxy-sentinel = {
client.via = "sentinel";
firewallRuleForNode.sentinel.allowedTCPPorts = [80 25565 25566];
firewallRuleForNode.sentinel.allowedTCPPorts = [
80
25565
25566
];
};
users.groups.minecraft.members = ["nginx"];
users.groups.minecraft.members = [ "nginx" ];
users.users.minecraft = {
description = "Minecraft server service user";
home = dataDir;
@ -379,7 +394,7 @@ in {
# - 25565,25566 (wan) -> 25565,25566 (proxy-sentinel)
networking.nftables.chains = {
postrouting.to-minecraft = {
after = ["hook"];
after = [ "hook" ];
rules = [
"iifname wan ip daddr ${config.wireguard.proxy-sentinel.ipv4} tcp dport 25565 masquerade random"
"iifname wan ip6 daddr ${config.wireguard.proxy-sentinel.ipv6} tcp dport 25565 masquerade random"
@ -388,7 +403,7 @@ in {
];
};
prerouting.to-minecraft = {
after = ["hook"];
after = [ "hook" ];
rules = [
"iifname wan tcp dport 25565 dnat ip to ${config.wireguard.proxy-sentinel.ipv4}"
"iifname wan tcp dport 25565 dnat ip6 to ${config.wireguard.proxy-sentinel.ipv6}"
@ -400,7 +415,7 @@ in {
services.nginx = {
upstreams.minecraft = {
servers."${config.wireguard.proxy-sentinel.ipv4}:80" = {};
servers."${config.wireguard.proxy-sentinel.ipv4}:80" = { };
extraConfig = ''
zone minecraft 64k;
keepalive 2;
@ -422,8 +437,8 @@ in {
systemd.services.minecraft-server = {
description = "Minecraft Server Service";
wantedBy = ["multi-user.target"];
after = ["network.target"];
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
path = [
# for infocmp
pkgs.ncurses
@ -431,18 +446,19 @@ in {
pkgs.libwebp
];
serviceConfig =
commonServiceConfig
// {
Type = "forking";
ExecStart = ''${getExe pkgs.tmux} -S /run/minecraft-server/tmux set -g default-shell ${getExe pkgs.bashInteractive} ";" new-session -d "${getExe pkgs.python3} ${./minecraft/server-loop.py} --block control/start.block ./start.sh :POST: ./backup.sh"'';
ExecStop = "${getExe pkgs.tmux} -S /run/minecraft-server/tmux kill-server";
serviceConfig = commonServiceConfig // {
Type = "forking";
ExecStart = ''${getExe pkgs.tmux} -S /run/minecraft-server/tmux set -g default-shell ${getExe pkgs.bashInteractive} ";" new-session -d "${getExe pkgs.python3} ${./minecraft/server-loop.py} --block control/start.block ./start.sh :POST: ./backup.sh"'';
ExecStop = "${getExe pkgs.tmux} -S /run/minecraft-server/tmux kill-server";
WorkingDirectory = "${dataDir}/server";
RuntimeDirectory = "minecraft-server";
ReadWritePaths = ["${dataDir}/server" "${dataDir}/web"];
ReadOnlyPaths = "${dataDir}/proxy";
};
WorkingDirectory = "${dataDir}/server";
RuntimeDirectory = "minecraft-server";
ReadWritePaths = [
"${dataDir}/server"
"${dataDir}/web"
];
ReadOnlyPaths = "${dataDir}/proxy";
};
preStart = ''
ln -sfT ${getExe server-start-script} start.sh
@ -470,21 +486,22 @@ in {
systemd.services.minecraft-proxy = {
description = "Minecraft Proxy Service";
wantedBy = ["multi-user.target"];
after = ["network.target"];
path = [pkgs.ncurses]; # for infocmp
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
path = [ pkgs.ncurses ]; # for infocmp
serviceConfig =
commonServiceConfig
// {
Type = "forking";
ExecStart = ''${getExe pkgs.tmux} -S /run/minecraft-proxy/tmux set -g default-shell ${getExe pkgs.bashInteractive} ";" new-session -d "${getExe pkgs.python3} ${./minecraft/server-loop.py} ./start.sh"'';
ExecStop = "${getExe pkgs.tmux} -S /run/minecraft-proxy/tmux kill-server";
serviceConfig = commonServiceConfig // {
Type = "forking";
ExecStart = ''${getExe pkgs.tmux} -S /run/minecraft-proxy/tmux set -g default-shell ${getExe pkgs.bashInteractive} ";" new-session -d "${getExe pkgs.python3} ${./minecraft/server-loop.py} ./start.sh"'';
ExecStop = "${getExe pkgs.tmux} -S /run/minecraft-proxy/tmux kill-server";
WorkingDirectory = "${dataDir}/proxy";
RuntimeDirectory = "minecraft-proxy";
ReadWritePaths = ["${dataDir}/proxy" "${dataDir}/server/control"];
};
WorkingDirectory = "${dataDir}/proxy";
RuntimeDirectory = "minecraft-proxy";
ReadWritePaths = [
"${dataDir}/proxy"
"${dataDir}/server/control"
];
};
preStart = ''
ln -sfT ${getExe proxy-start-script} start.sh

View file

@ -5,23 +5,25 @@
nodes,
pkgs,
...
}: let
}:
let
sentinelCfg = nodes.sentinel.config;
wardWebProxyCfg = nodes.ward-web-proxy.config;
paperlessDomain = "paperless.${globals.domains.me}";
paperlessBackupDir = "/var/cache/paperless-backup";
in {
in
{
microvm.mem = 1024 * 9;
microvm.vcpu = 8;
wireguard.proxy-sentinel = {
client.via = "sentinel";
firewallRuleForNode.sentinel.allowedTCPPorts = [config.services.paperless.port];
firewallRuleForNode.sentinel.allowedTCPPorts = [ config.services.paperless.port ];
};
wireguard.proxy-home = {
client.via = "ward";
firewallRuleForNode.ward-web-proxy.allowedTCPPorts = [config.services.paperless.port];
firewallRuleForNode.ward-web-proxy.allowedTCPPorts = [ config.services.paperless.port ];
};
globals.services.paperless.domain = paperlessDomain;
@ -34,7 +36,7 @@ in {
nodes.sentinel = {
services.nginx = {
upstreams.paperless = {
servers."${config.wireguard.proxy-sentinel.ipv4}:${toString config.services.paperless.port}" = {};
servers."${config.wireguard.proxy-sentinel.ipv4}:${toString config.services.paperless.port}" = { };
extraConfig = ''
zone paperless 64k;
keepalive 2;
@ -62,7 +64,7 @@ in {
nodes.ward-web-proxy = {
services.nginx = {
upstreams.paperless = {
servers."${config.wireguard.proxy-home.ipv4}:${toString config.services.paperless.port}" = {};
servers."${config.wireguard.proxy-home.ipv4}:${toString config.services.paperless.port}" = { };
extraConfig = ''
zone paperless 64k;
keepalive 2;
@ -191,20 +193,22 @@ in {
)
'';
systemd.services.paperless-backup = let
cfg = config.systemd.services.paperless-consumer;
in {
description = "Paperless documents backup";
serviceConfig = lib.recursiveUpdate cfg.serviceConfig {
ExecStart = "${config.services.paperless.package}/bin/paperless-ngx document_exporter -na -nt -f -d ${paperlessBackupDir}";
ReadWritePaths = cfg.serviceConfig.ReadWritePaths ++ [paperlessBackupDir];
Restart = "no";
Type = "oneshot";
systemd.services.paperless-backup =
let
cfg = config.systemd.services.paperless-consumer;
in
{
description = "Paperless documents backup";
serviceConfig = lib.recursiveUpdate cfg.serviceConfig {
ExecStart = "${config.services.paperless.package}/bin/paperless-ngx document_exporter -na -nt -f -d ${paperlessBackupDir}";
ReadWritePaths = cfg.serviceConfig.ReadWritePaths ++ [ paperlessBackupDir ];
Restart = "no";
Type = "oneshot";
};
inherit (cfg) environment;
requiredBy = [ "restic-backups-storage-box-dusk.service" ];
before = [ "restic-backups-storage-box-dusk.service" ];
};
inherit (cfg) environment;
requiredBy = ["restic-backups-storage-box-dusk.service"];
before = ["restic-backups-storage-box-dusk.service"];
};
# Needed so we don't run out of tmpfs space for large backups.
# Technically this could be cleared each boot but whatever.
@ -219,6 +223,6 @@ in {
backups.storageBoxes.dusk = {
subuser = "paperless";
paths = [paperlessBackupDir];
paths = [ paperlessBackupDir ];
};
}

View file

@ -3,7 +3,8 @@
globals,
lib,
...
}: let
}:
let
smbUsers = config.repo.secrets.local.samba.users;
smbGroups = config.repo.secrets.local.samba.groups;
@ -19,19 +20,22 @@
};
mkShare = id: path: cfg: {
${id} =
{
inherit path;
public = "no";
writable = "yes";
"create mask" = "0740";
"directory mask" = "0750";
"acl allow execute always" = "yes";
}
// cfg;
${id} = {
inherit path;
public = "no";
writable = "yes";
"create mask" = "0740";
"directory mask" = "0750";
"acl allow execute always" = "yes";
} // cfg;
};
mkGroupShares = group: {enableBunker ? false, ...}:
mkGroupShares =
group:
{
enableBunker ? false,
...
}:
[
(mkShare group "/shares/groups/${group}" {
"valid users" = "@${group}";
@ -47,11 +51,13 @@
}
);
mkUserShares = user: {
enableBunker ? false,
enablePaperless ? false,
...
}:
mkUserShares =
user:
{
enableBunker ? false,
enablePaperless ? false,
...
}:
[
(mkShare user "/shares/users/${user}" {
"valid users" = user;
@ -69,7 +75,8 @@
"force group" = "paperless";
}
);
in {
in
{
# For influxdb communication channel
wireguard.proxy-home.client.via = "ward";
@ -81,7 +88,7 @@ in {
services.openssh = {
# You really have to hate them. Thanks Brother ADS-4300N.
settings = {
Macs = ["hmac-sha2-512"];
Macs = [ "hmac-sha2-512" ];
HostkeyAlgorithms = "+ssh-rsa";
PubkeyAcceptedAlgorithms = "+ssh-rsa";
};
@ -115,18 +122,22 @@ in {
]
++ lib.flatten (
lib.flip lib.mapAttrsToList smbUsers (
name: {enableBunker ? false, ...}:
[(mkPersistent "/storage" "/shares/users/${name}" name)]
++ lib.optional enableBunker (
mkPersistent "/bunker" "/shares/users/${name}-bunker" name
)
name:
{
enableBunker ? false,
...
}:
[ (mkPersistent "/storage" "/shares/users/${name}" name) ]
++ lib.optional enableBunker (mkPersistent "/bunker" "/shares/users/${name}-bunker" name)
)
++ lib.flip lib.mapAttrsToList smbGroups (
name: {enableBunker ? false, ...}:
[(mkPersistent "/storage" "/shares/groups/${name}" name)]
++ lib.optional enableBunker (
mkPersistent "/bunker" "/shares/groups/${name}-bunker" name
)
name:
{
enableBunker ? false,
...
}:
[ (mkPersistent "/storage" "/shares/groups/${name}" name) ]
++ lib.optional enableBunker (mkPersistent "/bunker" "/shares/groups/${name}-bunker" name)
)
)
);
@ -153,7 +164,8 @@ in {
# Service Switch capability found in most modern C libraries, to arbitrary
# applications via PAM and ntlm_auth and to Samba itself.
winbindd.enable = false;
settings = lib.mkMerge ([
settings = lib.mkMerge (
[
{
global = {
# Show the server host name in the printer comment box in print manager
@ -215,9 +227,9 @@ in {
}
]
++ lib.flatten (
lib.mapAttrsToList mkUserShares smbUsers
++ lib.mapAttrsToList mkGroupShares smbGroups
));
lib.mapAttrsToList mkUserShares smbUsers ++ lib.mapAttrsToList mkGroupShares smbGroups
)
);
};
systemd.tmpfiles.settings = lib.mkMerge (
@ -255,8 +267,9 @@ in {
]
# For each paperless share, make sure the necessary sub-folders for that user are created
# at boot so we can bind-mount them into the shares.
++ lib.flatten (lib.flip lib.mapAttrsToList smbUsers (
user: userCfg:
++ lib.flatten (
lib.flip lib.mapAttrsToList smbUsers (
user: userCfg:
lib.optional (userCfg.enablePaperless or false) {
"10-smb-paperless" = {
"/shares/users/${user}-paperless".d = {
@ -293,7 +306,8 @@ in {
};
};
}
))
)
)
);
# For each paperless share, bind-mount create the necessary folders using tmpfiles.
@ -306,43 +320,50 @@ in {
]
++ lib.flip lib.mapAttrsToList smbUsers (
user: userCfg:
lib.optionalAttrs (userCfg.enablePaperless or false) {
"/shares/users/${user}-paperless/consume" = {
fsType = "none";
options = ["bind"];
device = "/paperless/consume/${user}";
};
"/shares/users/${user}-paperless/documents" = {
fsType = "none";
options = ["bind" "ro"];
device = "/paperless/media/documents/archive/${user}";
};
"/shares/users/${user}-paperless/originals" = {
fsType = "none";
options = ["bind" "ro"];
device = "/paperless/media/documents/originals/${user}";
};
}
lib.optionalAttrs (userCfg.enablePaperless or false) {
"/shares/users/${user}-paperless/consume" = {
fsType = "none";
options = [ "bind" ];
device = "/paperless/consume/${user}";
};
"/shares/users/${user}-paperless/documents" = {
fsType = "none";
options = [
"bind"
"ro"
];
device = "/paperless/media/documents/archive/${user}";
};
"/shares/users/${user}-paperless/originals" = {
fsType = "none";
options = [
"bind"
"ro"
];
device = "/paperless/media/documents/originals/${user}";
};
}
)
);
users.users = let
mkUser = name: id: groups: {
isNormalUser = true;
uid = id;
group = name;
extraGroups = groups;
createHome = false;
home = "/var/empty";
useDefaultShell = false;
autoSubUidGidRange = false;
};
in
users.users =
let
mkUser = name: id: groups: {
isNormalUser = true;
uid = id;
group = name;
extraGroups = groups;
createHome = false;
home = "/var/empty";
useDefaultShell = false;
autoSubUidGidRange = false;
};
in
lib.mkMerge [
(
{}
{ }
// lib.mapAttrs (name: cfg: mkUser name cfg.id cfg.groups) smbUsers
// lib.mapAttrs (name: cfg: mkUser name cfg.id []) smbGroups
// lib.mapAttrs (name: cfg: mkUser name cfg.id [ ]) smbGroups
)
{
scanner.openssh.authorizedKeys.keys = [
@ -357,14 +378,12 @@ in {
}
];
users.groups =
{
paperless.gid = config.ids.gids.paperless;
}
// lib.mapAttrs (_: cfg: {gid = cfg.id;}) (smbUsers // smbGroups);
users.groups = {
paperless.gid = config.ids.gids.paperless;
} // lib.mapAttrs (_: cfg: { gid = cfg.id; }) (smbUsers // smbGroups);
backups.storageBoxes.dusk = {
subuser = "samba";
paths = ["/bunker"];
paths = [ "/bunker" ];
};
}

View file

@ -3,7 +3,8 @@
globals,
lib,
...
}: {
}:
{
networking.hostId = config.repo.secrets.local.networking.hostId;
globals.monitoring.ping.sire = {
@ -15,8 +16,8 @@
boot.initrd.systemd.network = {
enable = true;
networks."10-lan" = {
address = [globals.net.home-lan.hosts.sire.cidrv4];
gateway = [globals.net.home-lan.hosts.ward.ipv4];
address = [ globals.net.home-lan.hosts.sire.cidrv4 ];
gateway = [ globals.net.home-lan.hosts.ward.ipv4 ];
matchConfig.MACAddress = config.repo.secrets.local.networking.interfaces.lan.mac;
networkConfig = {
IPv6PrivacyExtensions = "yes";
@ -53,8 +54,8 @@
'';
};
"20-lan-self" = {
address = [globals.net.home-lan.hosts.sire.cidrv4];
gateway = [globals.net.home-lan.hosts.ward.ipv4];
address = [ globals.net.home-lan.hosts.sire.cidrv4 ];
gateway = [ globals.net.home-lan.hosts.ward.ipv4 ];
matchConfig.Name = "lan-self";
networkConfig = {
IPv6PrivacyExtensions = "yes";
@ -71,7 +72,7 @@
};
networking.nftables.firewall = {
zones.untrusted.interfaces = ["lan-self"];
zones.untrusted.interfaces = [ "lan-self" ];
};
# Allow accessing influx