mirror of
https://github.com/oddlama/nix-config.git
synced 2025-10-11 07:10:39 +02:00
feat(sire): init host: main media storage server
This commit is contained in:
parent
36cb1d31cb
commit
1a96a4b8df
33 changed files with 364 additions and 69 deletions
|
@ -22,7 +22,7 @@
|
|||
];
|
||||
|
||||
boot.mode = "efi";
|
||||
boot.initrd.availableKernelModules = ["xhci_pci" "ahci" "nvme" "usbhid" "usb_storage" "sd_mod" "sdhci_pci" "r8169"];
|
||||
boot.initrd.availableKernelModules = ["xhci_pci" "ahci" "nvme" "usbhid" "usb_storage" "e1000e" "alx"];
|
||||
|
||||
meta.promtail = {
|
||||
enable = true;
|
||||
|
@ -76,6 +76,7 @@
|
|||
};
|
||||
};
|
||||
|
||||
# deadnix: skip
|
||||
mkContainer = guestName: {
|
||||
${guestName} =
|
||||
mkGuest guestName
|
||||
|
@ -88,29 +89,9 @@
|
|||
lib.mkIf (!minimal) (
|
||||
{}
|
||||
// mkMicrovm "adguardhome"
|
||||
// mkMicrovm "samba"
|
||||
// mkContainer "forgejo"
|
||||
// mkContainer "grafana"
|
||||
// mkContainer "influxdb"
|
||||
// mkContainer "kanidm"
|
||||
// mkContainer "loki"
|
||||
// mkContainer "paperless"
|
||||
// mkContainer "radicale"
|
||||
// mkContainer "vaultwarden"
|
||||
// mkMicrovm "forgejo"
|
||||
// mkMicrovm "kanidm"
|
||||
// mkMicrovm "radicale"
|
||||
// mkMicrovm "vaultwarden"
|
||||
);
|
||||
|
||||
#ddclient = defineVm;
|
||||
#samba+wsdd = defineVm;
|
||||
#fasten-health = defineVm;
|
||||
#immich = defineVm;
|
||||
#paperless = defineVm;
|
||||
#radicale = defineVm;
|
||||
#minecraft = defineVm;
|
||||
#firefly
|
||||
|
||||
#maddy = defineVm;
|
||||
#anonaddy = defineVm;
|
||||
|
||||
#automatic1111 = defineVm;
|
||||
#invokeai = defineVm;
|
||||
}
|
||||
|
|
|
@ -20,15 +20,13 @@
|
|||
};
|
||||
};
|
||||
zpool = with lib.disko.zfs; {
|
||||
rpool =
|
||||
defaultZpoolOptions
|
||||
// {
|
||||
datasets =
|
||||
defaultZfsDatasets
|
||||
// {
|
||||
"safe/guests" = unmountable;
|
||||
};
|
||||
};
|
||||
rpool = mkZpool {
|
||||
datasets =
|
||||
impermanenceZfsDatasets
|
||||
// {
|
||||
"safe/guests" = unmountable;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -1,161 +0,0 @@
|
|||
{
|
||||
config,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
sentinelCfg = nodes.sentinel.config;
|
||||
grafanaDomain = "grafana.${sentinelCfg.repo.secrets.local.personalDomain}";
|
||||
in {
|
||||
meta.wireguard-proxy.sentinel.allowedTCPPorts = [config.services.grafana.settings.server.http_port];
|
||||
|
||||
age.secrets.grafana-secret-key = {
|
||||
rekeyFile = config.node.secretsDir + "/grafana-secret-key.age";
|
||||
mode = "440";
|
||||
group = "grafana";
|
||||
};
|
||||
|
||||
age.secrets.grafana-loki-basic-auth-password = {
|
||||
generator.script = "alnum";
|
||||
mode = "440";
|
||||
group = "grafana";
|
||||
};
|
||||
|
||||
age.secrets.grafana-influxdb-token = {
|
||||
generator.script = "alnum";
|
||||
generator.tags = ["influxdb"];
|
||||
mode = "440";
|
||||
group = "grafana";
|
||||
};
|
||||
|
||||
# Mirror the original oauth2 secret
|
||||
age.secrets.grafana-oauth2-client-secret = {
|
||||
inherit (nodes.ward-kanidm.config.age.secrets.kanidm-oauth2-grafana) rekeyFile;
|
||||
mode = "440";
|
||||
group = "grafana";
|
||||
};
|
||||
|
||||
nodes.ward-influxdb = {
|
||||
# Mirror the original secret on the influx host
|
||||
age.secrets."grafana-influxdb-token-${config.node.name}" = {
|
||||
inherit (config.age.secrets.grafana-influxdb-token) rekeyFile;
|
||||
mode = "440";
|
||||
group = "influxdb2";
|
||||
};
|
||||
|
||||
services.influxdb2.provision.organizations.machines.auths."grafana machines:telegraf (${config.node.name})" = {
|
||||
readBuckets = ["telegraf"];
|
||||
writeBuckets = ["telegraf"];
|
||||
tokenFile = nodes.ward-influxdb.config.age.secrets."grafana-influxdb-token-${config.node.name}".path;
|
||||
};
|
||||
};
|
||||
|
||||
nodes.sentinel = {
|
||||
age.secrets.loki-basic-auth-hashes.generator.dependencies = [
|
||||
config.age.secrets.grafana-loki-basic-auth-password
|
||||
];
|
||||
|
||||
networking.providedDomains.grafana = grafanaDomain;
|
||||
|
||||
services.nginx = {
|
||||
upstreams.grafana = {
|
||||
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString config.services.grafana.settings.server.http_port}" = {};
|
||||
extraConfig = ''
|
||||
zone grafana 64k;
|
||||
keepalive 2;
|
||||
'';
|
||||
};
|
||||
virtualHosts.${grafanaDomain} = {
|
||||
forceSSL = true;
|
||||
useACMEWildcardHost = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://grafana";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
environment.persistence."/persist".directories = [
|
||||
{
|
||||
directory = config.services.grafana.dataDir;
|
||||
user = "grafana";
|
||||
group = "grafana";
|
||||
mode = "0700";
|
||||
}
|
||||
];
|
||||
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
settings = {
|
||||
analytics.reporting_enabled = false;
|
||||
users.allow_sign_up = false;
|
||||
|
||||
server = {
|
||||
domain = grafanaDomain;
|
||||
root_url = "https://${grafanaDomain}";
|
||||
enforce_domain = true;
|
||||
enable_gzip = true;
|
||||
http_addr = "0.0.0.0";
|
||||
http_port = 3001;
|
||||
};
|
||||
|
||||
security = {
|
||||
disable_initial_admin_creation = true;
|
||||
secret_key = "$__file{${config.age.secrets.grafana-secret-key.path}}";
|
||||
cookie_secure = true;
|
||||
disable_gravatar = true;
|
||||
hide_version = true;
|
||||
};
|
||||
|
||||
auth.disable_login_form = true;
|
||||
"auth.generic_oauth" = {
|
||||
enabled = true;
|
||||
name = "Kanidm";
|
||||
icon = "signin";
|
||||
allow_sign_up = true;
|
||||
#auto_login = true;
|
||||
client_id = "grafana";
|
||||
client_secret = "$__file{${config.age.secrets.grafana-oauth2-client-secret.path}}";
|
||||
scopes = "openid email profile";
|
||||
login_attribute_path = "prefered_username";
|
||||
auth_url = "https://${sentinelCfg.networking.providedDomains.kanidm}/ui/oauth2";
|
||||
token_url = "https://${sentinelCfg.networking.providedDomains.kanidm}/oauth2/token";
|
||||
api_url = "https://${sentinelCfg.networking.providedDomains.kanidm}/oauth2/openid/grafana/userinfo";
|
||||
use_pkce = true;
|
||||
# Allow mapping oauth2 roles to server admin
|
||||
allow_assign_grafana_admin = true;
|
||||
role_attribute_path = "contains(scopes[*], 'server_admin') && 'GrafanaAdmin' || contains(scopes[*], 'admin') && 'Admin' || contains(scopes[*], 'editor') && 'Editor' || 'Viewer'";
|
||||
};
|
||||
};
|
||||
|
||||
provision = {
|
||||
enable = true;
|
||||
datasources.settings.datasources = [
|
||||
{
|
||||
name = "InfluxDB (machines)";
|
||||
type = "influxdb";
|
||||
access = "proxy";
|
||||
url = "https://${sentinelCfg.networking.providedDomains.influxdb}";
|
||||
orgId = 1;
|
||||
secureJsonData.token = "$__file{${config.age.secrets.grafana-influxdb-token.path}}";
|
||||
jsonData.version = "Flux";
|
||||
jsonData.organization = "machines";
|
||||
jsonData.defaultBucket = "telegraf";
|
||||
}
|
||||
# TODO duplicate above influxdb source (with scoped read tokens??) for each organization
|
||||
{
|
||||
name = "Loki";
|
||||
type = "loki";
|
||||
access = "proxy";
|
||||
url = "https://${sentinelCfg.networking.providedDomains.loki}";
|
||||
orgId = 1;
|
||||
basicAuth = true;
|
||||
basicAuthUser = "${config.node.name}+grafana-loki-basic-auth-password";
|
||||
secureJsonData.basicAuthPassword = "$__file{${config.age.secrets.grafana-loki-basic-auth-password.path}}";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.grafana.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
{
|
||||
config,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
sentinelCfg = nodes.sentinel.config;
|
||||
immichDomain = "immich.${sentinelCfg.repo.secrets.local.personalDomain}";
|
||||
in {
|
||||
meta.wireguard-proxy.sentinel.allowedTCPPorts = [config.services.immich.web_port];
|
||||
|
||||
nodes.sentinel = {
|
||||
networking.providedDomains.immich = immichDomain;
|
||||
|
||||
services.nginx = {
|
||||
upstreams.immich = {
|
||||
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString config.services.immich.settings.bind_port}" = {};
|
||||
extraConfig = ''
|
||||
zone immich 64k;
|
||||
keepalive 2;
|
||||
'';
|
||||
};
|
||||
virtualHosts.${immichDomain} = {
|
||||
forceSSL = true;
|
||||
useACMEWildcardHost = true;
|
||||
oauth2.enable = true;
|
||||
oauth2.allowedGroups = ["access_immich"];
|
||||
locations."/" = {
|
||||
proxyPass = "http://immich";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.immich = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
systemd.services.grafana.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
nodes,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
sentinelCfg = nodes.sentinel.config;
|
||||
influxdbDomain = "influxdb.${sentinelCfg.repo.secrets.local.personalDomain}";
|
||||
influxdbPort = 8086;
|
||||
in {
|
||||
meta.wireguard-proxy.sentinel.allowedTCPPorts = [influxdbPort];
|
||||
|
||||
nodes.sentinel = {
|
||||
networking.providedDomains.influxdb = influxdbDomain;
|
||||
|
||||
services.nginx = {
|
||||
upstreams.influxdb = {
|
||||
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString influxdbPort}" = {};
|
||||
extraConfig = ''
|
||||
zone influxdb 64k;
|
||||
keepalive 2;
|
||||
'';
|
||||
};
|
||||
virtualHosts.${influxdbDomain} = let
|
||||
accessRules = ''
|
||||
satisfy any;
|
||||
${lib.concatMapStrings (ip: "allow ${ip};\n") sentinelCfg.meta.wireguard.proxy-sentinel.server.reservedAddresses}
|
||||
deny all;
|
||||
'';
|
||||
in {
|
||||
forceSSL = true;
|
||||
useACMEWildcardHost = true;
|
||||
oauth2.enable = true;
|
||||
oauth2.allowedGroups = ["access_influxdb"];
|
||||
locations."/" = {
|
||||
proxyPass = "http://influxdb";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = accessRules;
|
||||
};
|
||||
locations."/api/v2/write" = {
|
||||
proxyPass = "http://influxdb/api/v2/write";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = ''
|
||||
${accessRules}
|
||||
access_log off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
age.secrets.influxdb-admin-password = {
|
||||
generator.script = "alnum";
|
||||
mode = "440";
|
||||
group = "influxdb2";
|
||||
};
|
||||
|
||||
age.secrets.influxdb-admin-token = {
|
||||
generator.script = "alnum";
|
||||
mode = "440";
|
||||
group = "influxdb2";
|
||||
};
|
||||
|
||||
age.secrets.influxdb-user-telegraf-token = {
|
||||
generator.script = "alnum";
|
||||
mode = "440";
|
||||
group = "influxdb2";
|
||||
};
|
||||
|
||||
environment.persistence."/persist".directories = [
|
||||
{
|
||||
directory = "/var/lib/influxdb2";
|
||||
user = "influxdb2";
|
||||
group = "influxdb2";
|
||||
mode = "0700";
|
||||
}
|
||||
];
|
||||
|
||||
services.influxdb2 = {
|
||||
enable = true;
|
||||
settings = {
|
||||
reporting-disabled = true;
|
||||
http-bind-address = "0.0.0.0:${toString influxdbPort}";
|
||||
};
|
||||
provision = {
|
||||
enable = true;
|
||||
initialSetup = {
|
||||
organization = "default";
|
||||
bucket = "default";
|
||||
passwordFile = config.age.secrets.influxdb-admin-password.path;
|
||||
tokenFile = config.age.secrets.influxdb-admin-token.path;
|
||||
};
|
||||
organizations.machines.buckets.telegraf = {};
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = [pkgs.influxdb2-cli];
|
||||
|
||||
systemd.services.grafana.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
|
||||
}
|
|
@ -1,135 +0,0 @@
|
|||
{
|
||||
config,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
sentinelCfg = nodes.sentinel.config;
|
||||
lokiDomain = "loki.${sentinelCfg.repo.secrets.local.personalDomain}";
|
||||
in {
|
||||
meta.wireguard-proxy.sentinel.allowedTCPPorts = [config.services.loki.configuration.server.http_listen_port];
|
||||
|
||||
nodes.sentinel = {
|
||||
networking.providedDomains.loki = lokiDomain;
|
||||
|
||||
age.secrets.loki-basic-auth-hashes = {
|
||||
generator.script = "basic-auth";
|
||||
mode = "440";
|
||||
group = "nginx";
|
||||
};
|
||||
|
||||
services.nginx = {
|
||||
upstreams.loki = {
|
||||
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString config.services.loki.configuration.server.http_listen_port}" = {};
|
||||
extraConfig = ''
|
||||
zone loki 64k;
|
||||
keepalive 2;
|
||||
'';
|
||||
};
|
||||
virtualHosts.${lokiDomain} = {
|
||||
forceSSL = true;
|
||||
useACMEWildcardHost = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://loki";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = ''
|
||||
auth_basic "Authentication required";
|
||||
auth_basic_user_file ${sentinelCfg.age.secrets.loki-basic-auth-hashes.path};
|
||||
|
||||
proxy_read_timeout 1800s;
|
||||
proxy_connect_timeout 1600s;
|
||||
|
||||
access_log off;
|
||||
'';
|
||||
};
|
||||
locations."= /ready" = {
|
||||
proxyPass = "http://loki";
|
||||
extraConfig = ''
|
||||
auth_basic off;
|
||||
access_log off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
environment.persistence."/persist".directories = [
|
||||
{
|
||||
directory = "/var/lib/loki";
|
||||
user = "loki";
|
||||
group = "loki";
|
||||
mode = "0700";
|
||||
}
|
||||
];
|
||||
|
||||
services.loki = let
|
||||
lokiDir = "/var/lib/loki";
|
||||
in {
|
||||
enable = true;
|
||||
configuration = {
|
||||
analytics.reporting_enabled = false;
|
||||
auth_enabled = false;
|
||||
|
||||
server = {
|
||||
http_listen_address = "0.0.0.0";
|
||||
http_listen_port = 3100;
|
||||
log_level = "warn";
|
||||
};
|
||||
|
||||
ingester = {
|
||||
lifecycler = {
|
||||
address = "127.0.0.1";
|
||||
ring = {
|
||||
kvstore.store = "inmemory";
|
||||
replication_factor = 1;
|
||||
};
|
||||
final_sleep = "0s";
|
||||
};
|
||||
chunk_idle_period = "5m";
|
||||
chunk_retain_period = "30s";
|
||||
};
|
||||
|
||||
schema_config.configs = [
|
||||
{
|
||||
from = "2023-06-01";
|
||||
store = "tsdb";
|
||||
object_store = "filesystem";
|
||||
schema = "v12";
|
||||
index = {
|
||||
prefix = "index_";
|
||||
period = "24h";
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
storage_config = {
|
||||
tsdb_shipper = {
|
||||
active_index_directory = "${lokiDir}/tsdb-index";
|
||||
cache_location = "${lokiDir}/tsdb-cache";
|
||||
cache_ttl = "24h";
|
||||
shared_store = "filesystem";
|
||||
};
|
||||
filesystem.directory = "${lokiDir}/chunks";
|
||||
};
|
||||
|
||||
# Do not accept new logs that are ingressed when they are actually already old.
|
||||
limits_config = {
|
||||
reject_old_samples = true;
|
||||
reject_old_samples_max_age = "168h";
|
||||
};
|
||||
|
||||
# Do not delete old logs automatically
|
||||
table_manager = {
|
||||
retention_deletes_enabled = false;
|
||||
retention_period = "0s";
|
||||
};
|
||||
|
||||
compactor = {
|
||||
working_directory = lokiDir;
|
||||
shared_store = "filesystem";
|
||||
compactor_ring.kvstore.store = "inmemory";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.loki.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
|
||||
}
|
|
@ -1,77 +0,0 @@
|
|||
{
|
||||
config,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
sentinelCfg = nodes.sentinel.config;
|
||||
paperlessDomain = "paperless.${sentinelCfg.repo.secrets.local.personalDomain}";
|
||||
in {
|
||||
# XXX: remove microvm.mem = 1024 * 12;
|
||||
# XXX: remove microvm.vcpu = 4;
|
||||
|
||||
meta.wireguard-proxy.sentinel.allowedTCPPorts = [
|
||||
config.services.paperless.port
|
||||
];
|
||||
|
||||
age.secrets.paperless-admin-password = {
|
||||
rekeyFile = config.node.secretsDir + "/paperless-admin-password.age";
|
||||
generator.script = "alnum";
|
||||
mode = "440";
|
||||
group = "paperless";
|
||||
};
|
||||
|
||||
nodes.sentinel = {
|
||||
networking.providedDomains.paperless = paperlessDomain;
|
||||
|
||||
services.nginx = {
|
||||
upstreams.paperless = {
|
||||
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString config.services.paperless.port}" = {};
|
||||
extraConfig = ''
|
||||
zone paperless 64k;
|
||||
keepalive 2;
|
||||
'';
|
||||
};
|
||||
virtualHosts.${paperlessDomain} = {
|
||||
forceSSL = true;
|
||||
useACMEWildcardHost = true;
|
||||
extraConfig = ''
|
||||
client_max_body_size 512M;
|
||||
'';
|
||||
locations."/" = {
|
||||
proxyPass = "http://paperless";
|
||||
proxyWebsockets = true;
|
||||
X-Frame-Options = "SAMEORIGIN";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# TODO environment.persistence."/persist".directories = [
|
||||
# TODO {
|
||||
# TODO directory = "/var/lib/???";
|
||||
# TODO user = "???";
|
||||
# TODO group = "???";
|
||||
# TODO mode = "0700";
|
||||
# TODO }
|
||||
# TODO ];
|
||||
|
||||
services.paperless = {
|
||||
enable = true;
|
||||
address = "0.0.0.0";
|
||||
passwordFile = config.age.secrets.paperless-admin-password.path;
|
||||
extraConfig = {
|
||||
PAPERLESS_URL = "https://${paperlessDomain}";
|
||||
PAPERLESS_CONSUMER_ENABLE_BARCODES = true;
|
||||
PAPERLESS_CONSUMER_ENABLE_ASN_BARCODE = true;
|
||||
PAPERLESS_CONSUMER_BARCODE_SCANNER = "ZXING";
|
||||
PAPERLESS_FILENAME_FORMAT = "{created_year}-{created_month}-{created_day}_{asn}_{title}";
|
||||
#PAPERLESS_IGNORE_DATES = concatStringsSep "," ignoreDates;
|
||||
PAPERLESS_NUMBER_OF_SUGGESTED_DATES = 4;
|
||||
PAPERLESS_OCR_LANGUAGE = "deu+eng";
|
||||
PAPERLESS_TASK_WORKERS = 4;
|
||||
PAPERLESS_WEBSERVER_WORKERS = 4;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.paperless.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
|
||||
}
|
|
@ -1,123 +0,0 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
smbUsers = config.repo.secrets.local.samba.users;
|
||||
smbGroups = config.repo.secrets.local.samba.groups;
|
||||
in {
|
||||
age.secrets."samba-passdb.tdb" = {
|
||||
rekeyFile = config.node.secretsDir + "/samba-passdb.tdb.age";
|
||||
mode = "600";
|
||||
};
|
||||
|
||||
services.samba = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
|
||||
# Disable Samba's nmbd, because we don't want to reply to NetBIOS over IP
|
||||
# requests, since all of our clients hardcode the server shares.
|
||||
enableNmbd = false;
|
||||
# Disable Samba's winbindd, which provides a number of services to the Name
|
||||
# Service Switch capability found in most modern C libraries, to arbitrary
|
||||
# applications via PAM and ntlm_auth and to Samba itself.
|
||||
enableWinbindd = false;
|
||||
extraConfig = lib.concatLines [
|
||||
# Show the server host name in the printer comment box in print manager
|
||||
# and next to the IPC connection in net view.
|
||||
"server string = my-nas"
|
||||
# Set the NetBIOS name by which the Samba server is known.
|
||||
"netbios name = my-nas"
|
||||
# Disable netbios support. We don't need to support browsing since all
|
||||
# clients hardcode the host and share names.
|
||||
"disable netbios = yes"
|
||||
# Deny access to all hosts by default.
|
||||
"hosts deny = 0.0.0.0/0"
|
||||
# Allow access to local network and TODO: wireguard
|
||||
"hosts allow = 192.168.1.0/22 192.168.100.0/24"
|
||||
|
||||
# Set sane logging options
|
||||
"log level = 0 auth:2 passdb:2"
|
||||
"log file = /dev/null"
|
||||
"max log size = 0"
|
||||
"logging = systemd"
|
||||
|
||||
# TODO: allow based on wireguard ip without username and password
|
||||
# Users always have to login with an account and are never mapped
|
||||
# to a guest account.
|
||||
"passdb backend = tdbsam:${config.age.secrets."samba-passdb.tdb".path}"
|
||||
"server role = standalone"
|
||||
"guest account = nobody"
|
||||
"map to guest = never"
|
||||
|
||||
# Clients should only connect using the latest SMB3 protocol (e.g., on
|
||||
# clients running Windows 8 and later).
|
||||
"server min protocol = SMB3_11"
|
||||
# Require native SMB transport encryption by default.
|
||||
"server smb encrypt = required"
|
||||
|
||||
# Disable printer sharing. By default Samba shares printers configured
|
||||
# using CUPS.
|
||||
"load printers = no"
|
||||
"printing = bsd"
|
||||
"printcap name = /dev/null"
|
||||
"disable spoolss = yes"
|
||||
"show add printer wizard = no"
|
||||
|
||||
# Load in modules (order is critical!) and enable AAPL extensions.
|
||||
"vfs objects = catia fruit streams_xattr"
|
||||
# Enable Apple's SMB2+ extension.
|
||||
"fruit:aapl = yes"
|
||||
# Clean up unused or empty files created by the OS or Samba.
|
||||
"fruit:wipe_intentionally_left_blank_rfork = yes"
|
||||
"fruit:delete_empty_adfiles = yes"
|
||||
];
|
||||
shares = let
|
||||
mkShare = path: cfg:
|
||||
{
|
||||
inherit path;
|
||||
public = "no";
|
||||
writable = "yes";
|
||||
"create mask" = "0770";
|
||||
"directory mask" = "0770";
|
||||
# "force create mode" = "0660";
|
||||
# "force directory mode" = "0770";
|
||||
#"acl allow execute always" = "yes";
|
||||
}
|
||||
// cfg;
|
||||
|
||||
mkGroupShare = group:
|
||||
mkShare "/shares/groups/${group}" {
|
||||
"valid users" = "@${group}";
|
||||
"force user" = "family";
|
||||
"force group" = group;
|
||||
};
|
||||
|
||||
mkUserShare = user:
|
||||
mkShare "/shares/users/${user}" {
|
||||
"valid users" = user;
|
||||
};
|
||||
in
|
||||
{}
|
||||
// lib.mapAttrs (name: _: mkUserShare name) smbUsers
|
||||
// lib.mapAttrs (name: _: mkGroupShare name) smbGroups;
|
||||
};
|
||||
|
||||
users.users = let
|
||||
mkUser = name: id: groups: {
|
||||
isNormalUser = true;
|
||||
uid = id;
|
||||
group = name;
|
||||
extraGroups = groups;
|
||||
createHome = false;
|
||||
home = "/var/empty";
|
||||
useDefaultShell = false;
|
||||
autoSubUidGidRange = false;
|
||||
};
|
||||
in
|
||||
{}
|
||||
// lib.mapAttrs (name: cfg: mkUser name cfg.id cfg.groups) smbUsers
|
||||
// lib.mapAttrs (name: cfg: mkUser name cfg.id []) smbGroups;
|
||||
|
||||
users.groups = lib.mapAttrs (_: cfg: {gid = cfg.id;}) (smbUsers // smbGroups);
|
||||
}
|
|
@ -4,7 +4,7 @@
|
|||
...
|
||||
}: let
|
||||
inherit (lib) net;
|
||||
lanCidrv4 = "192.168.100.0/24";
|
||||
lanCidrv4 = "192.168.1.0/24";
|
||||
dnsIp = net.cidr.host 2 lanCidrv4;
|
||||
in {
|
||||
# TODO make meta.kea module?
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
lib,
|
||||
...
|
||||
}: let
|
||||
lanCidrv4 = "192.168.100.0/24";
|
||||
lanCidrv4 = "192.168.1.0/24";
|
||||
lanCidrv6 = "fd10::/64";
|
||||
in {
|
||||
networking.hostId = config.repo.secrets.local.networking.hostId;
|
||||
|
@ -44,13 +44,8 @@ in {
|
|||
#dhcpV4Config.UseDNS = false;
|
||||
#dhcpV6Config.UseDNS = false;
|
||||
#ipv6AcceptRAConfig.UseDNS = false;
|
||||
address = [
|
||||
"192.168.178.7/24"
|
||||
#"fdee::1/64"
|
||||
];
|
||||
gateway = [
|
||||
"192.168.178.1"
|
||||
];
|
||||
address = ["192.168.178.2/24"];
|
||||
gateway = ["192.168.178.1"];
|
||||
matchConfig.MACAddress = config.repo.secrets.local.networking.interfaces.wan.mac;
|
||||
networkConfig.IPv6PrivacyExtensions = "yes";
|
||||
linkConfig.RequiredForOnline = "routable";
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> X25519 S365Ptmx5jGBBvN7q/nxHZWLT4wsHYey5TSIvqfKqXs
|
||||
MODSBeb8Kt0CfFdTgPskMFVaen28O5N5ql7aqxJ+YaQ
|
||||
-> piv-p256 xqSe8Q A8G1Ljc2V/ay90ZiITuXGDxRaH5R/QqDsSpXbsYQFFjx
|
||||
nE6ODZqg4QAujfWOeTRD/S0m/8bRadTqSCQa5sVIJ3w
|
||||
-> <*^9;-grease X4qEn "qK,G4} 5Gp'jn!Q
|
||||
bU3aA07kpeHbqAoFMrp4mWj3/iPH67VZpE+mW2Z9huxze+Jn1js0p/hV2fj2jlWm
|
||||
/DZP
|
||||
--- vSYl/yA0H1WBqkDI+lu8o1+/l7pOt5wFwb2cLuCDWFQ
|
||||
¤YÊBç'íŽ;HIët%‹¼?{e8ÞÀïV€B«QýFÌ»‡eí>ìmæ(øõG†Âì̸„9ßͺȘ"‘Lû2zA~O€F³jsÂ"¸
|
|
@ -1 +0,0 @@
|
|||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINIoRbU/YRiGKC0mXggTEvjxzR9Wno3Z4dLL0rzryvOn
|
|
@ -1 +0,0 @@
|
|||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOanBR6abVoPfpH9RyhxaJ1dg0/+VFAUyCfQzFqxGBzk
|
|
@ -1 +0,0 @@
|
|||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOeNwlldS5+lZvIgBpeoTOKj26zzG+LeSZYEvJzbCBHY
|
|
@ -1 +0,0 @@
|
|||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM7FT4AvwoDetiXFiYA0l/B03ThuNrOnPsCxDdpzLXXZ
|
|
@ -1,9 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> X25519 T+p8DC+r5eXbafinXz0AuqaDgyTXzVEk75YCzbzPORg
|
||||
AocHJ7AtX2NWN7PeLjc6tbaYKW6p793vajC+eBAtA2k
|
||||
-> piv-p256 xqSe8Q A5oLMFDESd7+zHU0i/DXaiFC/G8OWgW2y8boYRR5NUQ1
|
||||
qcIQJlkPhS/ARwzV6ajvnefELmxI4/a6kXnJyjryq5I
|
||||
-> +8Z-grease o*-Th)vX %TAq
|
||||
nQRpWbLvit6lC0NV/sZk
|
||||
--- p4feRTSXzE66RtPi9F/vxSxJv1tlcnYa7OFnt0FyDeI
|
||||
vh³ ºa«ç9/YýU¹¶œþã¼S}üZ&'Yõ7Y´=K†L,»HWç‹tŸ¨…�¤ïé1º„h¦æf'£š±M÷ðßpÿ{E×£,«d™4
|
|
@ -1 +0,0 @@
|
|||
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIMV+QsCngZ1k6Ta5pqz2wRHsiSlEwlfl7pgSRfHzF5Y
|
|
@ -1,10 +0,0 @@
|
|||
age-encryption.org/v1
|
||||
-> X25519 rGyfugBW1UJ6ufBn8FUWby1AG3ZnBDnNXMBGEXOi/GM
|
||||
I87QSk3ZBL4FZjdwFd7RS2aRNizPRn/gAdQEUDrnTak
|
||||
-> piv-p256 xqSe8Q ArQj/8FR6hO8vrqY+1e/YN+h46hSCMg0c3tqZ6U3ApMS
|
||||
+XzFGrEz4z2tU6N7b2taf6j8V4WJi4NfQq4IJHV53l0
|
||||
-> #=2[OV-grease cKs OHnI
|
||||
iLqxxLbFIrTYFSDGKsOtZ8j7nw
|
||||
--- Uu8dPdMbw1Zvs8ZuzNbm/LBoeexh3sEiXht6IrkYf8A
|
||||
…!ÂŐB¦Řß*ÇZśZ20&bÉĘ×~d-Ń;¤,…J2î�§ăôažtć>P}gŐ…z’sóMÝ„6ŽíU°ŕYŢâFĚÖ”©o@ă‡rS«ÖVĹ�KA7ĺ$ť“�—�ńXD‰ 6ě˙ĺGćüć�m»i!IŹDCFĄ'�ĄĹŻŻ‘ŰŻC™[w$iG.U:PŹ8Óľűż…h›µŻićô¨ŘčüÉżŠŐ‹Ć{ŠĹá×vç8Zpľ9˙˘˙uPŁÝÚVj�e×€ĘJ`CÎ:K0¬W0čv�i˙G–!÷2T�2ůŠĎCGÔpzVĐđ€ě� —nN&2é8)¶Ť%˘Fäo‘gůľű¨VëeeUiz
|
||||
�"Ô¤ŮÍ!Ž)î%áHŢcswă'Ş(íͦ" Îtl•EţUŠâ[]Iľ`.>KˇÖfÂÄWŽ&�·çQ–®hĄ·{+ž7V
|
Binary file not shown.
Binary file not shown.
Loading…
Add table
Add a link
Reference in a new issue