feat(sire): init host: main media storage server

This commit is contained in:
oddlama 2024-01-04 19:06:23 +01:00
parent 36cb1d31cb
commit 1a96a4b8df
No known key found for this signature in database
GPG key ID: 14EFE510775FE39A
33 changed files with 364 additions and 69 deletions

100
hosts/sire/default.nix Normal file
View file

@ -0,0 +1,100 @@
{
config,
inputs,
lib,
nodes,
minimal,
...
}: {
imports = [
inputs.nixos-hardware.nixosModules.common-cpu-intel
inputs.nixos-hardware.nixosModules.common-pc-ssd
../../modules/optional/hardware/intel.nix
../../modules/optional/hardware/physical.nix
../../modules
../../modules/optional/initrd-ssh.nix
../../modules/optional/zfs.nix
./fs.nix
./net.nix
];
boot.mode = "efi";
boot.initrd.availableKernelModules = ["xhci_pci" "ahci" "nvme" "usbhid" "usb_storage" "sd_mod" "sdhci_pci" "r8169"];
meta.promtail = {
enable = true;
proxy = "sentinel";
};
# Connect safely via wireguard to skip authentication
networking.hosts.${nodes.sentinel.config.meta.wireguard.proxy-sentinel.ipv4} = [nodes.sentinel.config.networking.providedDomains.influxdb];
meta.telegraf = {
enable = true;
influxdb2 = {
domain = nodes.sentinel.config.networking.providedDomains.influxdb;
organization = "machines";
bucket = "telegraf";
node = "ward-influxdb";
};
};
# TODO track my github stats
# services.telegraf.extraConfig.inputs.github = {};
guests = let
mkGuest = guestName: {
autostart = true;
zfs."/state" = {
pool = "rpool";
dataset = "local/guests/${guestName}";
};
zfs."/persist" = {
pool = "rpool";
dataset = "safe/guests/${guestName}";
};
modules = [
../../modules
./guests/common.nix
./guests/${guestName}.nix
{node.secretsDir = ./secrets/${guestName};}
];
};
mkMicrovm = guestName: {
${guestName} =
mkGuest guestName
// {
backend = "microvm";
microvm = {
system = "x86_64-linux";
macvtap = "lan";
baseMac = config.repo.secrets.local.networking.interfaces.lan.mac;
};
};
};
# deadnix: skip
mkContainer = guestName: {
${guestName} =
mkGuest guestName
// {
backend = "container";
container.macvlan = "lan";
};
};
in
lib.mkIf (!minimal) (
{}
// mkMicrovm "samba"
// mkMicrovm "grafana"
// mkMicrovm "influxdb"
// mkMicrovm "loki"
// mkMicrovm "paperless"
#// mkMicrovm "minecraft"
#// mkMicrovm "immich"
#// mkMicrovm "firefly"
#// mkMicrovm "fasten-health"
);
}

118
hosts/sire/fs.nix Normal file
View file

@ -0,0 +1,118 @@
{
config,
lib,
...
}: {
disko.devices = {
disk =
{
m2-ssd-1 = {
type = "disk";
device = "/dev/disk/by-id/${config.repo.secrets.local.disk.m2-ssd-1}";
content = with lib.disko.gpt; {
type = "table";
format = "gpt";
partitions = [
(partEfi "efi" "0%" "1GiB")
(partLuksZfs "rpool" "1GiB" "100%")
];
};
};
m2-ssd-2 = {
type = "disk";
device = "/dev/disk/by-id/${config.repo.secrets.local.disk.m2-ssd-2}";
content = lib.disko.content.luksZfs "rpool";
};
}
// lib.genAttrs config.repo.secrets.local.disk.hdds-tank (disk: {
type = "disk";
device = "/dev/disk/by-id/${disk}";
content = lib.disko.content.luksZfs "tank";
});
zpool = with lib.disko.zfs; {
rpool = mkZpool {
mode = "mirror";
datasets =
impermanenceZfsDatasets
// {
"safe/guests" = unmountable;
};
};
tank = mkZpool {
mode = "raidz1";
datasets = {
"safe/guests" = unmountable;
};
};
};
};
services.zrepl = {
enable = true;
settings = {
global = {
logging = [
{
type = "syslog";
level = "info";
format = "human";
}
];
# TODO zrepl monitor
#monitoring = [
# {
# type = "prometheus";
# listen = ":9811";
# listen_freebind = true;
# }
#];
};
jobs = [
{
name = "local-snapshots";
type = "snap";
filesystems = {
"rpool/local/state<" = true;
"rpool/safe<" = true;
"tank/safe<" = true;
};
snapshotting = {
type = "periodic";
prefix = "zrepl-";
timestamp_format = "iso-8601";
interval = "15m";
};
pruning.keep = [
# Keep all manual snapshots
{
type = "regex";
regex = "^zrepl-.*$";
negate = true;
}
# Keep last n snapshots
{
type = "last_n";
regex = "^zrepl-.*$";
count = 10;
}
# Prune periodically
{
type = "grid";
regex = "^zrepl-.*$";
grid = lib.concatStringsSep " | " [
"72x1h"
"90x1d"
"60x1w"
"24x30d"
];
}
];
}
];
};
};
boot.initrd.luks.devices.enc-rpool.allowDiscards = true;
boot.initrd.luks.devices.enc-tank.allowDiscards = true;
}

View file

@ -0,0 +1,27 @@
{
config,
lib,
nodes,
...
}: let
sentinelCfg = nodes.sentinel.config;
in {
meta.wireguard-proxy.sentinel = {};
meta.promtail = {
enable = true;
proxy = "sentinel";
};
# Connect safely via wireguard to skip http authentication
networking.hosts.${sentinelCfg.meta.wireguard.proxy-sentinel.ipv4} = [sentinelCfg.networking.providedDomains.influxdb];
meta.telegraf = lib.mkIf (!config.boot.isContainer) {
enable = true;
scrapeSensors = false;
influxdb2 = {
domain = sentinelCfg.networking.providedDomains.influxdb;
organization = "machines";
bucket = "telegraf";
node = "ward-influxdb";
};
};
}

View file

@ -0,0 +1,161 @@
{
config,
nodes,
...
}: let
sentinelCfg = nodes.sentinel.config;
grafanaDomain = "grafana.${sentinelCfg.repo.secrets.local.personalDomain}";
in {
meta.wireguard-proxy.sentinel.allowedTCPPorts = [config.services.grafana.settings.server.http_port];
age.secrets.grafana-secret-key = {
rekeyFile = config.node.secretsDir + "/grafana-secret-key.age";
mode = "440";
group = "grafana";
};
age.secrets.grafana-loki-basic-auth-password = {
generator.script = "alnum";
mode = "440";
group = "grafana";
};
age.secrets.grafana-influxdb-token = {
generator.script = "alnum";
generator.tags = ["influxdb"];
mode = "440";
group = "grafana";
};
# Mirror the original oauth2 secret
age.secrets.grafana-oauth2-client-secret = {
inherit (nodes.ward-kanidm.config.age.secrets.kanidm-oauth2-grafana) rekeyFile;
mode = "440";
group = "grafana";
};
nodes.ward-influxdb = {
# Mirror the original secret on the influx host
age.secrets."grafana-influxdb-token-${config.node.name}" = {
inherit (config.age.secrets.grafana-influxdb-token) rekeyFile;
mode = "440";
group = "influxdb2";
};
services.influxdb2.provision.organizations.machines.auths."grafana machines:telegraf (${config.node.name})" = {
readBuckets = ["telegraf"];
writeBuckets = ["telegraf"];
tokenFile = nodes.ward-influxdb.config.age.secrets."grafana-influxdb-token-${config.node.name}".path;
};
};
nodes.sentinel = {
age.secrets.loki-basic-auth-hashes.generator.dependencies = [
config.age.secrets.grafana-loki-basic-auth-password
];
networking.providedDomains.grafana = grafanaDomain;
services.nginx = {
upstreams.grafana = {
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString config.services.grafana.settings.server.http_port}" = {};
extraConfig = ''
zone grafana 64k;
keepalive 2;
'';
};
virtualHosts.${grafanaDomain} = {
forceSSL = true;
useACMEWildcardHost = true;
locations."/" = {
proxyPass = "http://grafana";
proxyWebsockets = true;
};
};
};
};
environment.persistence."/persist".directories = [
{
directory = config.services.grafana.dataDir;
user = "grafana";
group = "grafana";
mode = "0700";
}
];
services.grafana = {
enable = true;
settings = {
analytics.reporting_enabled = false;
users.allow_sign_up = false;
server = {
domain = grafanaDomain;
root_url = "https://${grafanaDomain}";
enforce_domain = true;
enable_gzip = true;
http_addr = "0.0.0.0";
http_port = 3001;
};
security = {
disable_initial_admin_creation = true;
secret_key = "$__file{${config.age.secrets.grafana-secret-key.path}}";
cookie_secure = true;
disable_gravatar = true;
hide_version = true;
};
auth.disable_login_form = true;
"auth.generic_oauth" = {
enabled = true;
name = "Kanidm";
icon = "signin";
allow_sign_up = true;
#auto_login = true;
client_id = "grafana";
client_secret = "$__file{${config.age.secrets.grafana-oauth2-client-secret.path}}";
scopes = "openid email profile";
login_attribute_path = "prefered_username";
auth_url = "https://${sentinelCfg.networking.providedDomains.kanidm}/ui/oauth2";
token_url = "https://${sentinelCfg.networking.providedDomains.kanidm}/oauth2/token";
api_url = "https://${sentinelCfg.networking.providedDomains.kanidm}/oauth2/openid/grafana/userinfo";
use_pkce = true;
# Allow mapping oauth2 roles to server admin
allow_assign_grafana_admin = true;
role_attribute_path = "contains(scopes[*], 'server_admin') && 'GrafanaAdmin' || contains(scopes[*], 'admin') && 'Admin' || contains(scopes[*], 'editor') && 'Editor' || 'Viewer'";
};
};
provision = {
enable = true;
datasources.settings.datasources = [
{
name = "InfluxDB (machines)";
type = "influxdb";
access = "proxy";
url = "https://${sentinelCfg.networking.providedDomains.influxdb}";
orgId = 1;
secureJsonData.token = "$__file{${config.age.secrets.grafana-influxdb-token.path}}";
jsonData.version = "Flux";
jsonData.organization = "machines";
jsonData.defaultBucket = "telegraf";
}
# TODO duplicate above influxdb source (with scoped read tokens??) for each organization
{
name = "Loki";
type = "loki";
access = "proxy";
url = "https://${sentinelCfg.networking.providedDomains.loki}";
orgId = 1;
basicAuth = true;
basicAuthUser = "${config.node.name}+grafana-loki-basic-auth-password";
secureJsonData.basicAuthPassword = "$__file{${config.age.secrets.grafana-loki-basic-auth-password.path}}";
}
];
};
};
systemd.services.grafana.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
}

View file

@ -0,0 +1,40 @@
{
config,
nodes,
...
}: let
sentinelCfg = nodes.sentinel.config;
immichDomain = "immich.${sentinelCfg.repo.secrets.local.personalDomain}";
in {
meta.wireguard-proxy.sentinel.allowedTCPPorts = [config.services.immich.web_port];
nodes.sentinel = {
networking.providedDomains.immich = immichDomain;
services.nginx = {
upstreams.immich = {
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString config.services.immich.settings.bind_port}" = {};
extraConfig = ''
zone immich 64k;
keepalive 2;
'';
};
virtualHosts.${immichDomain} = {
forceSSL = true;
useACMEWildcardHost = true;
oauth2.enable = true;
oauth2.allowedGroups = ["access_immich"];
locations."/" = {
proxyPass = "http://immich";
proxyWebsockets = true;
};
};
};
};
services.immich = {
enable = true;
};
systemd.services.grafana.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
}

View file

@ -0,0 +1,101 @@
{
config,
lib,
nodes,
pkgs,
...
}: let
sentinelCfg = nodes.sentinel.config;
influxdbDomain = "influxdb.${sentinelCfg.repo.secrets.local.personalDomain}";
influxdbPort = 8086;
in {
meta.wireguard-proxy.sentinel.allowedTCPPorts = [influxdbPort];
nodes.sentinel = {
networking.providedDomains.influxdb = influxdbDomain;
services.nginx = {
upstreams.influxdb = {
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString influxdbPort}" = {};
extraConfig = ''
zone influxdb 64k;
keepalive 2;
'';
};
virtualHosts.${influxdbDomain} = let
accessRules = ''
satisfy any;
${lib.concatMapStrings (ip: "allow ${ip};\n") sentinelCfg.meta.wireguard.proxy-sentinel.server.reservedAddresses}
deny all;
'';
in {
forceSSL = true;
useACMEWildcardHost = true;
oauth2.enable = true;
oauth2.allowedGroups = ["access_influxdb"];
locations."/" = {
proxyPass = "http://influxdb";
proxyWebsockets = true;
extraConfig = accessRules;
};
locations."/api/v2/write" = {
proxyPass = "http://influxdb/api/v2/write";
proxyWebsockets = true;
extraConfig = ''
${accessRules}
access_log off;
'';
};
};
};
};
age.secrets.influxdb-admin-password = {
generator.script = "alnum";
mode = "440";
group = "influxdb2";
};
age.secrets.influxdb-admin-token = {
generator.script = "alnum";
mode = "440";
group = "influxdb2";
};
age.secrets.influxdb-user-telegraf-token = {
generator.script = "alnum";
mode = "440";
group = "influxdb2";
};
environment.persistence."/persist".directories = [
{
directory = "/var/lib/influxdb2";
user = "influxdb2";
group = "influxdb2";
mode = "0700";
}
];
services.influxdb2 = {
enable = true;
settings = {
reporting-disabled = true;
http-bind-address = "0.0.0.0:${toString influxdbPort}";
};
provision = {
enable = true;
initialSetup = {
organization = "default";
bucket = "default";
passwordFile = config.age.secrets.influxdb-admin-password.path;
tokenFile = config.age.secrets.influxdb-admin-token.path;
};
organizations.machines.buckets.telegraf = {};
};
};
environment.systemPackages = [pkgs.influxdb2-cli];
systemd.services.grafana.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
}

135
hosts/sire/guests/loki.nix Normal file
View file

@ -0,0 +1,135 @@
{
config,
nodes,
...
}: let
sentinelCfg = nodes.sentinel.config;
lokiDomain = "loki.${sentinelCfg.repo.secrets.local.personalDomain}";
in {
meta.wireguard-proxy.sentinel.allowedTCPPorts = [config.services.loki.configuration.server.http_listen_port];
nodes.sentinel = {
networking.providedDomains.loki = lokiDomain;
age.secrets.loki-basic-auth-hashes = {
generator.script = "basic-auth";
mode = "440";
group = "nginx";
};
services.nginx = {
upstreams.loki = {
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString config.services.loki.configuration.server.http_listen_port}" = {};
extraConfig = ''
zone loki 64k;
keepalive 2;
'';
};
virtualHosts.${lokiDomain} = {
forceSSL = true;
useACMEWildcardHost = true;
locations."/" = {
proxyPass = "http://loki";
proxyWebsockets = true;
extraConfig = ''
auth_basic "Authentication required";
auth_basic_user_file ${sentinelCfg.age.secrets.loki-basic-auth-hashes.path};
proxy_read_timeout 1800s;
proxy_connect_timeout 1600s;
access_log off;
'';
};
locations."= /ready" = {
proxyPass = "http://loki";
extraConfig = ''
auth_basic off;
access_log off;
'';
};
};
};
};
environment.persistence."/persist".directories = [
{
directory = "/var/lib/loki";
user = "loki";
group = "loki";
mode = "0700";
}
];
services.loki = let
lokiDir = "/var/lib/loki";
in {
enable = true;
configuration = {
analytics.reporting_enabled = false;
auth_enabled = false;
server = {
http_listen_address = "0.0.0.0";
http_listen_port = 3100;
log_level = "warn";
};
ingester = {
lifecycler = {
address = "127.0.0.1";
ring = {
kvstore.store = "inmemory";
replication_factor = 1;
};
final_sleep = "0s";
};
chunk_idle_period = "5m";
chunk_retain_period = "30s";
};
schema_config.configs = [
{
from = "2023-06-01";
store = "tsdb";
object_store = "filesystem";
schema = "v12";
index = {
prefix = "index_";
period = "24h";
};
}
];
storage_config = {
tsdb_shipper = {
active_index_directory = "${lokiDir}/tsdb-index";
cache_location = "${lokiDir}/tsdb-cache";
cache_ttl = "24h";
shared_store = "filesystem";
};
filesystem.directory = "${lokiDir}/chunks";
};
# Do not accept new logs that are ingressed when they are actually already old.
limits_config = {
reject_old_samples = true;
reject_old_samples_max_age = "168h";
};
# Do not delete old logs automatically
table_manager = {
retention_deletes_enabled = false;
retention_period = "0s";
};
compactor = {
working_directory = lokiDir;
shared_store = "filesystem";
compactor_ring.kvstore.store = "inmemory";
};
};
};
systemd.services.loki.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
}

View file

@ -0,0 +1,77 @@
{
config,
nodes,
...
}: let
sentinelCfg = nodes.sentinel.config;
paperlessDomain = "paperless.${sentinelCfg.repo.secrets.local.personalDomain}";
in {
# XXX: remove microvm.mem = 1024 * 12;
# XXX: remove microvm.vcpu = 4;
meta.wireguard-proxy.sentinel.allowedTCPPorts = [
config.services.paperless.port
];
age.secrets.paperless-admin-password = {
rekeyFile = config.node.secretsDir + "/paperless-admin-password.age";
generator.script = "alnum";
mode = "440";
group = "paperless";
};
nodes.sentinel = {
networking.providedDomains.paperless = paperlessDomain;
services.nginx = {
upstreams.paperless = {
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString config.services.paperless.port}" = {};
extraConfig = ''
zone paperless 64k;
keepalive 2;
'';
};
virtualHosts.${paperlessDomain} = {
forceSSL = true;
useACMEWildcardHost = true;
extraConfig = ''
client_max_body_size 512M;
'';
locations."/" = {
proxyPass = "http://paperless";
proxyWebsockets = true;
X-Frame-Options = "SAMEORIGIN";
};
};
};
};
# TODO environment.persistence."/persist".directories = [
# TODO {
# TODO directory = "/var/lib/???";
# TODO user = "???";
# TODO group = "???";
# TODO mode = "0700";
# TODO }
# TODO ];
services.paperless = {
enable = true;
address = "0.0.0.0";
passwordFile = config.age.secrets.paperless-admin-password.path;
extraConfig = {
PAPERLESS_URL = "https://${paperlessDomain}";
PAPERLESS_CONSUMER_ENABLE_BARCODES = true;
PAPERLESS_CONSUMER_ENABLE_ASN_BARCODE = true;
PAPERLESS_CONSUMER_BARCODE_SCANNER = "ZXING";
PAPERLESS_FILENAME_FORMAT = "{created_year}-{created_month}-{created_day}_{asn}_{title}";
#PAPERLESS_IGNORE_DATES = concatStringsSep "," ignoreDates;
PAPERLESS_NUMBER_OF_SUGGESTED_DATES = 4;
PAPERLESS_OCR_LANGUAGE = "deu+eng";
PAPERLESS_TASK_WORKERS = 4;
PAPERLESS_WEBSERVER_WORKERS = 4;
};
};
systemd.services.paperless.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
}

123
hosts/sire/guests/samba.nix Normal file
View file

@ -0,0 +1,123 @@
{
config,
lib,
...
}: let
smbUsers = config.repo.secrets.local.samba.users;
smbGroups = config.repo.secrets.local.samba.groups;
in {
age.secrets."samba-passdb.tdb" = {
rekeyFile = config.node.secretsDir + "/samba-passdb.tdb.age";
mode = "600";
};
services.samba = {
enable = true;
openFirewall = true;
# Disable Samba's nmbd, because we don't want to reply to NetBIOS over IP
# requests, since all of our clients hardcode the server shares.
enableNmbd = false;
# Disable Samba's winbindd, which provides a number of services to the Name
# Service Switch capability found in most modern C libraries, to arbitrary
# applications via PAM and ntlm_auth and to Samba itself.
enableWinbindd = false;
extraConfig = lib.concatLines [
# Show the server host name in the printer comment box in print manager
# and next to the IPC connection in net view.
"server string = my-nas"
# Set the NetBIOS name by which the Samba server is known.
"netbios name = my-nas"
# Disable netbios support. We don't need to support browsing since all
# clients hardcode the host and share names.
"disable netbios = yes"
# Deny access to all hosts by default.
"hosts deny = 0.0.0.0/0"
# Allow access to local network and TODO: wireguard
"hosts allow = 192.168.1.0/22 192.168.100.0/24"
# Set sane logging options
"log level = 0 auth:2 passdb:2"
"log file = /dev/null"
"max log size = 0"
"logging = systemd"
# TODO: allow based on wireguard ip without username and password
# Users always have to login with an account and are never mapped
# to a guest account.
"passdb backend = tdbsam:${config.age.secrets."samba-passdb.tdb".path}"
"server role = standalone"
"guest account = nobody"
"map to guest = never"
# Clients should only connect using the latest SMB3 protocol (e.g., on
# clients running Windows 8 and later).
"server min protocol = SMB3_11"
# Require native SMB transport encryption by default.
"server smb encrypt = required"
# Disable printer sharing. By default Samba shares printers configured
# using CUPS.
"load printers = no"
"printing = bsd"
"printcap name = /dev/null"
"disable spoolss = yes"
"show add printer wizard = no"
# Load in modules (order is critical!) and enable AAPL extensions.
"vfs objects = catia fruit streams_xattr"
# Enable Apple's SMB2+ extension.
"fruit:aapl = yes"
# Clean up unused or empty files created by the OS or Samba.
"fruit:wipe_intentionally_left_blank_rfork = yes"
"fruit:delete_empty_adfiles = yes"
];
shares = let
mkShare = path: cfg:
{
inherit path;
public = "no";
writable = "yes";
"create mask" = "0770";
"directory mask" = "0770";
# "force create mode" = "0660";
# "force directory mode" = "0770";
#"acl allow execute always" = "yes";
}
// cfg;
mkGroupShare = group:
mkShare "/shares/groups/${group}" {
"valid users" = "@${group}";
"force user" = "family";
"force group" = group;
};
mkUserShare = user:
mkShare "/shares/users/${user}" {
"valid users" = user;
};
in
{}
// lib.mapAttrs (name: _: mkUserShare name) smbUsers
// lib.mapAttrs (name: _: mkGroupShare name) smbGroups;
};
users.users = let
mkUser = name: id: groups: {
isNormalUser = true;
uid = id;
group = name;
extraGroups = groups;
createHome = false;
home = "/var/empty";
useDefaultShell = false;
autoSubUidGidRange = false;
};
in
{}
// lib.mapAttrs (name: cfg: mkUser name cfg.id cfg.groups) smbUsers
// lib.mapAttrs (name: cfg: mkUser name cfg.id []) smbGroups;
users.groups = lib.mapAttrs (_: cfg: {gid = cfg.id;}) (smbUsers // smbGroups);
}

66
hosts/sire/net.nix Normal file
View file

@ -0,0 +1,66 @@
{config, ...}: {
networking.hostId = config.repo.secrets.local.networking.hostId;
boot.initrd.systemd.network = {
enable = true;
networks."10-lan" = {
address = ["192.168.1.2"];
matchConfig.MACAddress = config.repo.secrets.local.networking.interfaces.lan.mac;
networkConfig = {
IPv6PrivacyExtensions = "yes";
MulticastDNS = true;
};
linkConfig.RequiredForOnline = "routable";
};
};
# Create a MACVTAP for ourselves too, so that we can communicate with
# our guests on the same interface.
systemd.network.netdevs."10-lan-self" = {
netdevConfig = {
Name = "lan-self";
Kind = "macvlan";
};
extraConfig = ''
[MACVLAN]
Mode=bridge
'';
};
systemd.network.networks = {
"10-lan" = {
matchConfig.MACAddress = config.repo.secrets.local.networking.interfaces.lan.mac;
# This interface should only be used from attached macvtaps.
# So don't acquire a link local address and only wait for
# this interface to gain a carrier.
networkConfig.LinkLocalAddressing = "no";
linkConfig.RequiredForOnline = "carrier";
extraConfig = ''
[Network]
MACVLAN=lan-self
'';
};
"20-lan-self" = {
address = ["192.168.1.2"];
matchConfig.Name = "lan-self";
networkConfig = {
IPv6PrivacyExtensions = "yes";
MulticastDNS = true;
};
linkConfig.RequiredForOnline = "routable";
};
# Remaining macvtap interfaces should not be touched.
"90-macvtap-ignore" = {
matchConfig.Kind = "macvtap";
linkConfig.ActivationPolicy = "manual";
linkConfig.Unmanaged = "yes";
};
};
networking.nftables.firewall = {
zones.untrusted.interfaces = ["lan-self"];
};
# Allow accessing influx
meta.wireguard.proxy-sentinel.client.via = "sentinel";
}

View file

@ -0,0 +1,10 @@
age-encryption.org/v1
-> X25519 S365Ptmx5jGBBvN7q/nxHZWLT4wsHYey5TSIvqfKqXs
MODSBeb8Kt0CfFdTgPskMFVaen28O5N5ql7aqxJ+YaQ
-> piv-p256 xqSe8Q A8G1Ljc2V/ay90ZiITuXGDxRaH5R/QqDsSpXbsYQFFjx
nE6ODZqg4QAujfWOeTRD/S0m/8bRadTqSCQa5sVIJ3w
-> <*^9;-grease X4qEn "qK,G4} 5Gp'jn!Q
bU3aA07kpeHbqAoFMrp4mWj3/iPH67VZpE+mW2Z9huxze+Jn1js0p/hV2fj2jlWm
/DZP
--- vSYl/yA0H1WBqkDI+lu8o1+/l7pOt5wFwb2cLuCDWFQ
¤YÊBç'íŽ;HIët%‹¼?{e8ÞÀïV€B«QýFÌ»‡eí>ìmæ(øõG†Âì̸„9ßͺȘ"‘Lû2zA~O€F³jsÂ"¸

View file

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINIoRbU/YRiGKC0mXggTEvjxzR9Wno3Z4dLL0rzryvOn

View file

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFMbumSAJX2MOuQO+5tFoL52Fe+TRD2dZwzNCGilACQ/

View file

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOanBR6abVoPfpH9RyhxaJ1dg0/+VFAUyCfQzFqxGBzk

Binary file not shown.

View file

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOeNwlldS5+lZvIgBpeoTOKj26zzG+LeSZYEvJzbCBHY

View file

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM7FT4AvwoDetiXFiYA0l/B03ThuNrOnPsCxDdpzLXXZ

View file

@ -0,0 +1,9 @@
age-encryption.org/v1
-> X25519 T+p8DC+r5eXbafinXz0AuqaDgyTXzVEk75YCzbzPORg
AocHJ7AtX2NWN7PeLjc6tbaYKW6p793vajC+eBAtA2k
-> piv-p256 xqSe8Q A5oLMFDESd7+zHU0i/DXaiFC/G8OWgW2y8boYRR5NUQ1
qcIQJlkPhS/ARwzV6ajvnefELmxI4/a6kXnJyjryq5I
-> +8Z-grease o*-Th)vX %TAq
nQRpWbLvit6lC0NV/sZk
--- p4feRTSXzE66RtPi9F/vxSxJv1tlcnYa7OFnt0FyDeI
vh³ ºa«ç9/YýU¹¶œþã¼S}üZ& 'Yõ7Y´=K†L,»HWç‹tŸ¨…�¤ïé1º„h¦æf'£š±M÷ðßpÿ{E ×£,«d™4

View file

@ -0,0 +1 @@
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIMV+QsCngZ1k6Ta5pqz2wRHsiSlEwlfl7pgSRfHzF5Y

View file

@ -0,0 +1,12 @@
age-encryption.org/v1
-> X25519 fKbik0Nwn3w0RFtyYjRx3NIRR6p1ePjwN1rQeQUKnC0
FESp5Xwwuu3hifwpoalYD75/g994HsDJb6a7lasAH98
-> piv-p256 xqSe8Q A/f8+j/94A2oU2/SynYRewGBZbPWy1rGU5pnUPksXkwH
n+KeTBbXvjCu9GZypD8Vmz2uuN1XaZpDfX40TNk74js
-> *:l-grease D8U!RlB wkBn7Zl4
PLWQ+OcE+p/gZ9AaOl5RmO8C5IO5rQD3GIazmdWs/ImIbPFgSY7NM+Tb4j/qrQez
--- 2ucK0s28/BTrnfxnm0vOvqsmOXLXBEnsxHMRHYUyLHo
¼b˜à¹oѯVo}¼å]3Kпâppú\­ÉYiæ}:FH÷Ó^ÉU°>ÚRÿô¿eM`0Î+îíÕ¯·±ÞÜÓ놪…Œ1¡50:F‚Y2M“^[u�ÇáZMy;„ký]z8û÷a~MæÔŸÿ1­cô/™óU¦3)–r–è¢Ç–Uõ>•÷˜‘ºóx?ý6xò¤6`!R_ψ¦�»’éæŒ¦£á·Žòû÷&ž(.«{x•›? rëhåÙêÂB}̨Në°#Œ–¿g[•õù2aR¯­lRØT§Ï£æ9W“”Û ]ŸÇ£ IŽ›œ26¼¨¨lô?íµäô·áÆ
~ÑXßµ½„”O·…φ#‚!àø.�‰�*äĤmjh*C˜¨¨}­{!¸ µ›
Ã&ÒN¿Ðm#vEFbË–3C´d\}·ajRÆ[…È[Ñ+ïp2%ÜãÊÈ­†óÀ/|5³þ(øÂ-à�žÝîa¹°dÝÔ_@Éà…g¬|.Á…o¦+à[œVÇ`‹tP©²¼

Binary file not shown.