forked from mirrors_public/oddlama_nix-config
feat(sire): init host: main media storage server
This commit is contained in:
parent
36cb1d31cb
commit
1a96a4b8df
33 changed files with 364 additions and 69 deletions
27
hosts/sire/guests/common.nix
Normal file
27
hosts/sire/guests/common.nix
Normal file
|
@ -0,0 +1,27 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
sentinelCfg = nodes.sentinel.config;
|
||||
in {
|
||||
meta.wireguard-proxy.sentinel = {};
|
||||
meta.promtail = {
|
||||
enable = true;
|
||||
proxy = "sentinel";
|
||||
};
|
||||
|
||||
# Connect safely via wireguard to skip http authentication
|
||||
networking.hosts.${sentinelCfg.meta.wireguard.proxy-sentinel.ipv4} = [sentinelCfg.networking.providedDomains.influxdb];
|
||||
meta.telegraf = lib.mkIf (!config.boot.isContainer) {
|
||||
enable = true;
|
||||
scrapeSensors = false;
|
||||
influxdb2 = {
|
||||
domain = sentinelCfg.networking.providedDomains.influxdb;
|
||||
organization = "machines";
|
||||
bucket = "telegraf";
|
||||
node = "ward-influxdb";
|
||||
};
|
||||
};
|
||||
}
|
161
hosts/sire/guests/grafana.nix
Normal file
161
hosts/sire/guests/grafana.nix
Normal file
|
@ -0,0 +1,161 @@
|
|||
{
|
||||
config,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
sentinelCfg = nodes.sentinel.config;
|
||||
grafanaDomain = "grafana.${sentinelCfg.repo.secrets.local.personalDomain}";
|
||||
in {
|
||||
meta.wireguard-proxy.sentinel.allowedTCPPorts = [config.services.grafana.settings.server.http_port];
|
||||
|
||||
age.secrets.grafana-secret-key = {
|
||||
rekeyFile = config.node.secretsDir + "/grafana-secret-key.age";
|
||||
mode = "440";
|
||||
group = "grafana";
|
||||
};
|
||||
|
||||
age.secrets.grafana-loki-basic-auth-password = {
|
||||
generator.script = "alnum";
|
||||
mode = "440";
|
||||
group = "grafana";
|
||||
};
|
||||
|
||||
age.secrets.grafana-influxdb-token = {
|
||||
generator.script = "alnum";
|
||||
generator.tags = ["influxdb"];
|
||||
mode = "440";
|
||||
group = "grafana";
|
||||
};
|
||||
|
||||
# Mirror the original oauth2 secret
|
||||
age.secrets.grafana-oauth2-client-secret = {
|
||||
inherit (nodes.ward-kanidm.config.age.secrets.kanidm-oauth2-grafana) rekeyFile;
|
||||
mode = "440";
|
||||
group = "grafana";
|
||||
};
|
||||
|
||||
nodes.ward-influxdb = {
|
||||
# Mirror the original secret on the influx host
|
||||
age.secrets."grafana-influxdb-token-${config.node.name}" = {
|
||||
inherit (config.age.secrets.grafana-influxdb-token) rekeyFile;
|
||||
mode = "440";
|
||||
group = "influxdb2";
|
||||
};
|
||||
|
||||
services.influxdb2.provision.organizations.machines.auths."grafana machines:telegraf (${config.node.name})" = {
|
||||
readBuckets = ["telegraf"];
|
||||
writeBuckets = ["telegraf"];
|
||||
tokenFile = nodes.ward-influxdb.config.age.secrets."grafana-influxdb-token-${config.node.name}".path;
|
||||
};
|
||||
};
|
||||
|
||||
nodes.sentinel = {
|
||||
age.secrets.loki-basic-auth-hashes.generator.dependencies = [
|
||||
config.age.secrets.grafana-loki-basic-auth-password
|
||||
];
|
||||
|
||||
networking.providedDomains.grafana = grafanaDomain;
|
||||
|
||||
services.nginx = {
|
||||
upstreams.grafana = {
|
||||
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString config.services.grafana.settings.server.http_port}" = {};
|
||||
extraConfig = ''
|
||||
zone grafana 64k;
|
||||
keepalive 2;
|
||||
'';
|
||||
};
|
||||
virtualHosts.${grafanaDomain} = {
|
||||
forceSSL = true;
|
||||
useACMEWildcardHost = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://grafana";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
environment.persistence."/persist".directories = [
|
||||
{
|
||||
directory = config.services.grafana.dataDir;
|
||||
user = "grafana";
|
||||
group = "grafana";
|
||||
mode = "0700";
|
||||
}
|
||||
];
|
||||
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
settings = {
|
||||
analytics.reporting_enabled = false;
|
||||
users.allow_sign_up = false;
|
||||
|
||||
server = {
|
||||
domain = grafanaDomain;
|
||||
root_url = "https://${grafanaDomain}";
|
||||
enforce_domain = true;
|
||||
enable_gzip = true;
|
||||
http_addr = "0.0.0.0";
|
||||
http_port = 3001;
|
||||
};
|
||||
|
||||
security = {
|
||||
disable_initial_admin_creation = true;
|
||||
secret_key = "$__file{${config.age.secrets.grafana-secret-key.path}}";
|
||||
cookie_secure = true;
|
||||
disable_gravatar = true;
|
||||
hide_version = true;
|
||||
};
|
||||
|
||||
auth.disable_login_form = true;
|
||||
"auth.generic_oauth" = {
|
||||
enabled = true;
|
||||
name = "Kanidm";
|
||||
icon = "signin";
|
||||
allow_sign_up = true;
|
||||
#auto_login = true;
|
||||
client_id = "grafana";
|
||||
client_secret = "$__file{${config.age.secrets.grafana-oauth2-client-secret.path}}";
|
||||
scopes = "openid email profile";
|
||||
login_attribute_path = "prefered_username";
|
||||
auth_url = "https://${sentinelCfg.networking.providedDomains.kanidm}/ui/oauth2";
|
||||
token_url = "https://${sentinelCfg.networking.providedDomains.kanidm}/oauth2/token";
|
||||
api_url = "https://${sentinelCfg.networking.providedDomains.kanidm}/oauth2/openid/grafana/userinfo";
|
||||
use_pkce = true;
|
||||
# Allow mapping oauth2 roles to server admin
|
||||
allow_assign_grafana_admin = true;
|
||||
role_attribute_path = "contains(scopes[*], 'server_admin') && 'GrafanaAdmin' || contains(scopes[*], 'admin') && 'Admin' || contains(scopes[*], 'editor') && 'Editor' || 'Viewer'";
|
||||
};
|
||||
};
|
||||
|
||||
provision = {
|
||||
enable = true;
|
||||
datasources.settings.datasources = [
|
||||
{
|
||||
name = "InfluxDB (machines)";
|
||||
type = "influxdb";
|
||||
access = "proxy";
|
||||
url = "https://${sentinelCfg.networking.providedDomains.influxdb}";
|
||||
orgId = 1;
|
||||
secureJsonData.token = "$__file{${config.age.secrets.grafana-influxdb-token.path}}";
|
||||
jsonData.version = "Flux";
|
||||
jsonData.organization = "machines";
|
||||
jsonData.defaultBucket = "telegraf";
|
||||
}
|
||||
# TODO duplicate above influxdb source (with scoped read tokens??) for each organization
|
||||
{
|
||||
name = "Loki";
|
||||
type = "loki";
|
||||
access = "proxy";
|
||||
url = "https://${sentinelCfg.networking.providedDomains.loki}";
|
||||
orgId = 1;
|
||||
basicAuth = true;
|
||||
basicAuthUser = "${config.node.name}+grafana-loki-basic-auth-password";
|
||||
secureJsonData.basicAuthPassword = "$__file{${config.age.secrets.grafana-loki-basic-auth-password.path}}";
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.grafana.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
|
||||
}
|
40
hosts/sire/guests/immich.nix
Normal file
40
hosts/sire/guests/immich.nix
Normal file
|
@ -0,0 +1,40 @@
|
|||
{
|
||||
config,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
sentinelCfg = nodes.sentinel.config;
|
||||
immichDomain = "immich.${sentinelCfg.repo.secrets.local.personalDomain}";
|
||||
in {
|
||||
meta.wireguard-proxy.sentinel.allowedTCPPorts = [config.services.immich.web_port];
|
||||
|
||||
nodes.sentinel = {
|
||||
networking.providedDomains.immich = immichDomain;
|
||||
|
||||
services.nginx = {
|
||||
upstreams.immich = {
|
||||
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString config.services.immich.settings.bind_port}" = {};
|
||||
extraConfig = ''
|
||||
zone immich 64k;
|
||||
keepalive 2;
|
||||
'';
|
||||
};
|
||||
virtualHosts.${immichDomain} = {
|
||||
forceSSL = true;
|
||||
useACMEWildcardHost = true;
|
||||
oauth2.enable = true;
|
||||
oauth2.allowedGroups = ["access_immich"];
|
||||
locations."/" = {
|
||||
proxyPass = "http://immich";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.immich = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
systemd.services.grafana.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
|
||||
}
|
101
hosts/sire/guests/influxdb.nix
Normal file
101
hosts/sire/guests/influxdb.nix
Normal file
|
@ -0,0 +1,101 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
nodes,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
sentinelCfg = nodes.sentinel.config;
|
||||
influxdbDomain = "influxdb.${sentinelCfg.repo.secrets.local.personalDomain}";
|
||||
influxdbPort = 8086;
|
||||
in {
|
||||
meta.wireguard-proxy.sentinel.allowedTCPPorts = [influxdbPort];
|
||||
|
||||
nodes.sentinel = {
|
||||
networking.providedDomains.influxdb = influxdbDomain;
|
||||
|
||||
services.nginx = {
|
||||
upstreams.influxdb = {
|
||||
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString influxdbPort}" = {};
|
||||
extraConfig = ''
|
||||
zone influxdb 64k;
|
||||
keepalive 2;
|
||||
'';
|
||||
};
|
||||
virtualHosts.${influxdbDomain} = let
|
||||
accessRules = ''
|
||||
satisfy any;
|
||||
${lib.concatMapStrings (ip: "allow ${ip};\n") sentinelCfg.meta.wireguard.proxy-sentinel.server.reservedAddresses}
|
||||
deny all;
|
||||
'';
|
||||
in {
|
||||
forceSSL = true;
|
||||
useACMEWildcardHost = true;
|
||||
oauth2.enable = true;
|
||||
oauth2.allowedGroups = ["access_influxdb"];
|
||||
locations."/" = {
|
||||
proxyPass = "http://influxdb";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = accessRules;
|
||||
};
|
||||
locations."/api/v2/write" = {
|
||||
proxyPass = "http://influxdb/api/v2/write";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = ''
|
||||
${accessRules}
|
||||
access_log off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
age.secrets.influxdb-admin-password = {
|
||||
generator.script = "alnum";
|
||||
mode = "440";
|
||||
group = "influxdb2";
|
||||
};
|
||||
|
||||
age.secrets.influxdb-admin-token = {
|
||||
generator.script = "alnum";
|
||||
mode = "440";
|
||||
group = "influxdb2";
|
||||
};
|
||||
|
||||
age.secrets.influxdb-user-telegraf-token = {
|
||||
generator.script = "alnum";
|
||||
mode = "440";
|
||||
group = "influxdb2";
|
||||
};
|
||||
|
||||
environment.persistence."/persist".directories = [
|
||||
{
|
||||
directory = "/var/lib/influxdb2";
|
||||
user = "influxdb2";
|
||||
group = "influxdb2";
|
||||
mode = "0700";
|
||||
}
|
||||
];
|
||||
|
||||
services.influxdb2 = {
|
||||
enable = true;
|
||||
settings = {
|
||||
reporting-disabled = true;
|
||||
http-bind-address = "0.0.0.0:${toString influxdbPort}";
|
||||
};
|
||||
provision = {
|
||||
enable = true;
|
||||
initialSetup = {
|
||||
organization = "default";
|
||||
bucket = "default";
|
||||
passwordFile = config.age.secrets.influxdb-admin-password.path;
|
||||
tokenFile = config.age.secrets.influxdb-admin-token.path;
|
||||
};
|
||||
organizations.machines.buckets.telegraf = {};
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = [pkgs.influxdb2-cli];
|
||||
|
||||
systemd.services.grafana.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
|
||||
}
|
135
hosts/sire/guests/loki.nix
Normal file
135
hosts/sire/guests/loki.nix
Normal file
|
@ -0,0 +1,135 @@
|
|||
{
|
||||
config,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
sentinelCfg = nodes.sentinel.config;
|
||||
lokiDomain = "loki.${sentinelCfg.repo.secrets.local.personalDomain}";
|
||||
in {
|
||||
meta.wireguard-proxy.sentinel.allowedTCPPorts = [config.services.loki.configuration.server.http_listen_port];
|
||||
|
||||
nodes.sentinel = {
|
||||
networking.providedDomains.loki = lokiDomain;
|
||||
|
||||
age.secrets.loki-basic-auth-hashes = {
|
||||
generator.script = "basic-auth";
|
||||
mode = "440";
|
||||
group = "nginx";
|
||||
};
|
||||
|
||||
services.nginx = {
|
||||
upstreams.loki = {
|
||||
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString config.services.loki.configuration.server.http_listen_port}" = {};
|
||||
extraConfig = ''
|
||||
zone loki 64k;
|
||||
keepalive 2;
|
||||
'';
|
||||
};
|
||||
virtualHosts.${lokiDomain} = {
|
||||
forceSSL = true;
|
||||
useACMEWildcardHost = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://loki";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = ''
|
||||
auth_basic "Authentication required";
|
||||
auth_basic_user_file ${sentinelCfg.age.secrets.loki-basic-auth-hashes.path};
|
||||
|
||||
proxy_read_timeout 1800s;
|
||||
proxy_connect_timeout 1600s;
|
||||
|
||||
access_log off;
|
||||
'';
|
||||
};
|
||||
locations."= /ready" = {
|
||||
proxyPass = "http://loki";
|
||||
extraConfig = ''
|
||||
auth_basic off;
|
||||
access_log off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
environment.persistence."/persist".directories = [
|
||||
{
|
||||
directory = "/var/lib/loki";
|
||||
user = "loki";
|
||||
group = "loki";
|
||||
mode = "0700";
|
||||
}
|
||||
];
|
||||
|
||||
services.loki = let
|
||||
lokiDir = "/var/lib/loki";
|
||||
in {
|
||||
enable = true;
|
||||
configuration = {
|
||||
analytics.reporting_enabled = false;
|
||||
auth_enabled = false;
|
||||
|
||||
server = {
|
||||
http_listen_address = "0.0.0.0";
|
||||
http_listen_port = 3100;
|
||||
log_level = "warn";
|
||||
};
|
||||
|
||||
ingester = {
|
||||
lifecycler = {
|
||||
address = "127.0.0.1";
|
||||
ring = {
|
||||
kvstore.store = "inmemory";
|
||||
replication_factor = 1;
|
||||
};
|
||||
final_sleep = "0s";
|
||||
};
|
||||
chunk_idle_period = "5m";
|
||||
chunk_retain_period = "30s";
|
||||
};
|
||||
|
||||
schema_config.configs = [
|
||||
{
|
||||
from = "2023-06-01";
|
||||
store = "tsdb";
|
||||
object_store = "filesystem";
|
||||
schema = "v12";
|
||||
index = {
|
||||
prefix = "index_";
|
||||
period = "24h";
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
storage_config = {
|
||||
tsdb_shipper = {
|
||||
active_index_directory = "${lokiDir}/tsdb-index";
|
||||
cache_location = "${lokiDir}/tsdb-cache";
|
||||
cache_ttl = "24h";
|
||||
shared_store = "filesystem";
|
||||
};
|
||||
filesystem.directory = "${lokiDir}/chunks";
|
||||
};
|
||||
|
||||
# Do not accept new logs that are ingressed when they are actually already old.
|
||||
limits_config = {
|
||||
reject_old_samples = true;
|
||||
reject_old_samples_max_age = "168h";
|
||||
};
|
||||
|
||||
# Do not delete old logs automatically
|
||||
table_manager = {
|
||||
retention_deletes_enabled = false;
|
||||
retention_period = "0s";
|
||||
};
|
||||
|
||||
compactor = {
|
||||
working_directory = lokiDir;
|
||||
shared_store = "filesystem";
|
||||
compactor_ring.kvstore.store = "inmemory";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.loki.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
|
||||
}
|
77
hosts/sire/guests/paperless.nix
Normal file
77
hosts/sire/guests/paperless.nix
Normal file
|
@ -0,0 +1,77 @@
|
|||
{
|
||||
config,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
sentinelCfg = nodes.sentinel.config;
|
||||
paperlessDomain = "paperless.${sentinelCfg.repo.secrets.local.personalDomain}";
|
||||
in {
|
||||
# XXX: remove microvm.mem = 1024 * 12;
|
||||
# XXX: remove microvm.vcpu = 4;
|
||||
|
||||
meta.wireguard-proxy.sentinel.allowedTCPPorts = [
|
||||
config.services.paperless.port
|
||||
];
|
||||
|
||||
age.secrets.paperless-admin-password = {
|
||||
rekeyFile = config.node.secretsDir + "/paperless-admin-password.age";
|
||||
generator.script = "alnum";
|
||||
mode = "440";
|
||||
group = "paperless";
|
||||
};
|
||||
|
||||
nodes.sentinel = {
|
||||
networking.providedDomains.paperless = paperlessDomain;
|
||||
|
||||
services.nginx = {
|
||||
upstreams.paperless = {
|
||||
servers."${config.meta.wireguard.proxy-sentinel.ipv4}:${toString config.services.paperless.port}" = {};
|
||||
extraConfig = ''
|
||||
zone paperless 64k;
|
||||
keepalive 2;
|
||||
'';
|
||||
};
|
||||
virtualHosts.${paperlessDomain} = {
|
||||
forceSSL = true;
|
||||
useACMEWildcardHost = true;
|
||||
extraConfig = ''
|
||||
client_max_body_size 512M;
|
||||
'';
|
||||
locations."/" = {
|
||||
proxyPass = "http://paperless";
|
||||
proxyWebsockets = true;
|
||||
X-Frame-Options = "SAMEORIGIN";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# TODO environment.persistence."/persist".directories = [
|
||||
# TODO {
|
||||
# TODO directory = "/var/lib/???";
|
||||
# TODO user = "???";
|
||||
# TODO group = "???";
|
||||
# TODO mode = "0700";
|
||||
# TODO }
|
||||
# TODO ];
|
||||
|
||||
services.paperless = {
|
||||
enable = true;
|
||||
address = "0.0.0.0";
|
||||
passwordFile = config.age.secrets.paperless-admin-password.path;
|
||||
extraConfig = {
|
||||
PAPERLESS_URL = "https://${paperlessDomain}";
|
||||
PAPERLESS_CONSUMER_ENABLE_BARCODES = true;
|
||||
PAPERLESS_CONSUMER_ENABLE_ASN_BARCODE = true;
|
||||
PAPERLESS_CONSUMER_BARCODE_SCANNER = "ZXING";
|
||||
PAPERLESS_FILENAME_FORMAT = "{created_year}-{created_month}-{created_day}_{asn}_{title}";
|
||||
#PAPERLESS_IGNORE_DATES = concatStringsSep "," ignoreDates;
|
||||
PAPERLESS_NUMBER_OF_SUGGESTED_DATES = 4;
|
||||
PAPERLESS_OCR_LANGUAGE = "deu+eng";
|
||||
PAPERLESS_TASK_WORKERS = 4;
|
||||
PAPERLESS_WEBSERVER_WORKERS = 4;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.paperless.serviceConfig.RestartSec = "600"; # Retry every 10 minutes
|
||||
}
|
123
hosts/sire/guests/samba.nix
Normal file
123
hosts/sire/guests/samba.nix
Normal file
|
@ -0,0 +1,123 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
smbUsers = config.repo.secrets.local.samba.users;
|
||||
smbGroups = config.repo.secrets.local.samba.groups;
|
||||
in {
|
||||
age.secrets."samba-passdb.tdb" = {
|
||||
rekeyFile = config.node.secretsDir + "/samba-passdb.tdb.age";
|
||||
mode = "600";
|
||||
};
|
||||
|
||||
services.samba = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
|
||||
# Disable Samba's nmbd, because we don't want to reply to NetBIOS over IP
|
||||
# requests, since all of our clients hardcode the server shares.
|
||||
enableNmbd = false;
|
||||
# Disable Samba's winbindd, which provides a number of services to the Name
|
||||
# Service Switch capability found in most modern C libraries, to arbitrary
|
||||
# applications via PAM and ntlm_auth and to Samba itself.
|
||||
enableWinbindd = false;
|
||||
extraConfig = lib.concatLines [
|
||||
# Show the server host name in the printer comment box in print manager
|
||||
# and next to the IPC connection in net view.
|
||||
"server string = my-nas"
|
||||
# Set the NetBIOS name by which the Samba server is known.
|
||||
"netbios name = my-nas"
|
||||
# Disable netbios support. We don't need to support browsing since all
|
||||
# clients hardcode the host and share names.
|
||||
"disable netbios = yes"
|
||||
# Deny access to all hosts by default.
|
||||
"hosts deny = 0.0.0.0/0"
|
||||
# Allow access to local network and TODO: wireguard
|
||||
"hosts allow = 192.168.1.0/22 192.168.100.0/24"
|
||||
|
||||
# Set sane logging options
|
||||
"log level = 0 auth:2 passdb:2"
|
||||
"log file = /dev/null"
|
||||
"max log size = 0"
|
||||
"logging = systemd"
|
||||
|
||||
# TODO: allow based on wireguard ip without username and password
|
||||
# Users always have to login with an account and are never mapped
|
||||
# to a guest account.
|
||||
"passdb backend = tdbsam:${config.age.secrets."samba-passdb.tdb".path}"
|
||||
"server role = standalone"
|
||||
"guest account = nobody"
|
||||
"map to guest = never"
|
||||
|
||||
# Clients should only connect using the latest SMB3 protocol (e.g., on
|
||||
# clients running Windows 8 and later).
|
||||
"server min protocol = SMB3_11"
|
||||
# Require native SMB transport encryption by default.
|
||||
"server smb encrypt = required"
|
||||
|
||||
# Disable printer sharing. By default Samba shares printers configured
|
||||
# using CUPS.
|
||||
"load printers = no"
|
||||
"printing = bsd"
|
||||
"printcap name = /dev/null"
|
||||
"disable spoolss = yes"
|
||||
"show add printer wizard = no"
|
||||
|
||||
# Load in modules (order is critical!) and enable AAPL extensions.
|
||||
"vfs objects = catia fruit streams_xattr"
|
||||
# Enable Apple's SMB2+ extension.
|
||||
"fruit:aapl = yes"
|
||||
# Clean up unused or empty files created by the OS or Samba.
|
||||
"fruit:wipe_intentionally_left_blank_rfork = yes"
|
||||
"fruit:delete_empty_adfiles = yes"
|
||||
];
|
||||
shares = let
|
||||
mkShare = path: cfg:
|
||||
{
|
||||
inherit path;
|
||||
public = "no";
|
||||
writable = "yes";
|
||||
"create mask" = "0770";
|
||||
"directory mask" = "0770";
|
||||
# "force create mode" = "0660";
|
||||
# "force directory mode" = "0770";
|
||||
#"acl allow execute always" = "yes";
|
||||
}
|
||||
// cfg;
|
||||
|
||||
mkGroupShare = group:
|
||||
mkShare "/shares/groups/${group}" {
|
||||
"valid users" = "@${group}";
|
||||
"force user" = "family";
|
||||
"force group" = group;
|
||||
};
|
||||
|
||||
mkUserShare = user:
|
||||
mkShare "/shares/users/${user}" {
|
||||
"valid users" = user;
|
||||
};
|
||||
in
|
||||
{}
|
||||
// lib.mapAttrs (name: _: mkUserShare name) smbUsers
|
||||
// lib.mapAttrs (name: _: mkGroupShare name) smbGroups;
|
||||
};
|
||||
|
||||
users.users = let
|
||||
mkUser = name: id: groups: {
|
||||
isNormalUser = true;
|
||||
uid = id;
|
||||
group = name;
|
||||
extraGroups = groups;
|
||||
createHome = false;
|
||||
home = "/var/empty";
|
||||
useDefaultShell = false;
|
||||
autoSubUidGidRange = false;
|
||||
};
|
||||
in
|
||||
{}
|
||||
// lib.mapAttrs (name: cfg: mkUser name cfg.id cfg.groups) smbUsers
|
||||
// lib.mapAttrs (name: cfg: mkUser name cfg.id []) smbGroups;
|
||||
|
||||
users.groups = lib.mapAttrs (_: cfg: {gid = cfg.id;}) (smbUsers // smbGroups);
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue