forked from mirrors_public/oddlama_nix-config
wip: prepare testing caddy over nginx with oauth2-proxy
This commit is contained in:
parent
7f2f93b640
commit
c5a863ce51
7 changed files with 245 additions and 10 deletions
|
@ -100,6 +100,14 @@
|
|||
mode = "0700";
|
||||
}
|
||||
]
|
||||
++ lib.optionals config.services.caddy.enable [
|
||||
{
|
||||
directory = config.services.caddy.dataDir;
|
||||
user = "caddy";
|
||||
group = "caddy";
|
||||
mode = "0700";
|
||||
}
|
||||
]
|
||||
++ lib.optionals config.services.loki.enable [
|
||||
{
|
||||
directory = "/var/lib/loki";
|
||||
|
|
45
hosts/sentinel/caddy.nix
Normal file
45
hosts/sentinel/caddy.nix
Normal file
|
@ -0,0 +1,45 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
inherit (config.repo.secrets.local) acme personalDomain;
|
||||
in {
|
||||
networking.domain = personalDomain;
|
||||
|
||||
rekey.secrets.acme-credentials = {
|
||||
file = ./secrets/acme-credentials.age;
|
||||
mode = "440";
|
||||
group = "acme";
|
||||
};
|
||||
|
||||
security.acme = {
|
||||
acceptTerms = true;
|
||||
defaults = {
|
||||
inherit (acme) email;
|
||||
credentialsFile = config.rekey.secrets.acme-credentials.path;
|
||||
dnsProvider = "cloudflare";
|
||||
dnsPropagationCheck = true;
|
||||
reloadServices = ["nginx"];
|
||||
};
|
||||
};
|
||||
extra.acme.wildcardDomains = acme.domains;
|
||||
users.groups.acme.members = ["nginx"];
|
||||
|
||||
rekey.secrets."dhparams.pem" = {
|
||||
file = ./secrets/dhparams.pem.age;
|
||||
mode = "440";
|
||||
group = "nginx";
|
||||
};
|
||||
|
||||
services.caddy = let
|
||||
authDomain = nodes.ward-nginx.config.services.kanidm.serverSettings.domain;
|
||||
authPort = lib.last (lib.splitString ":" nodes.ward-nginx.config.services.kanidm.serverSettings.bindaddress);
|
||||
grafanaDomain = nodes.ward-test.config.services.grafana.settings.server.domain;
|
||||
grafanaPort = toString nodes.ward-test.config.services.grafana.settings.server.http_port;
|
||||
lokiDomain = "loki.${personalDomain}";
|
||||
lokiPort = toString nodes.ward-loki.config.services.loki.settings.server.http_port;
|
||||
in {
|
||||
};
|
||||
}
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
./fs.nix
|
||||
./net.nix
|
||||
./nginx.nix
|
||||
#./nginx.nix
|
||||
./caddy.nix
|
||||
];
|
||||
}
|
||||
|
|
|
@ -38,8 +38,14 @@ in {
|
|||
authPort = lib.last (lib.splitString ":" nodes.ward-nginx.config.services.kanidm.serverSettings.bindaddress);
|
||||
grafanaDomain = nodes.ward-test.config.services.grafana.settings.server.domain;
|
||||
grafanaPort = toString nodes.ward-test.config.services.grafana.settings.server.http_port;
|
||||
lokiDomain = "loki.${personalDomain}";
|
||||
lokiPort = toString nodes.ward-loki.config.services.loki.settings.server.http_port;
|
||||
in {
|
||||
enable = true;
|
||||
|
||||
# TODO move subconfigs to the relevant hosts instead.
|
||||
# -> have something like merged config nodes.<name>....
|
||||
|
||||
upstreams.kanidm = {
|
||||
servers."${nodes.ward-nginx.config.extra.wireguard.proxy-sentinel.ipv4}:${authPort}" = {};
|
||||
extraConfig = ''
|
||||
|
@ -71,5 +77,32 @@ in {
|
|||
useACMEHost = config.lib.extra.matchingWildcardCert grafanaDomain;
|
||||
locations."/".proxyPass = "http://grafana";
|
||||
};
|
||||
|
||||
upstreams.loki = {
|
||||
servers."${nodes.ward-loki.config.extra.wireguard.proxy-sentinel.ipv4}:${lokiPort}" = {};
|
||||
extraConfig = ''
|
||||
zone loki 64k;
|
||||
keepalive 2;
|
||||
'';
|
||||
};
|
||||
virtualHosts.${lokiDomain} = {
|
||||
forceSSL = true;
|
||||
useACMEHost = config.lib.extra.matchingWildcardCert lokiDomain;
|
||||
locations."/" = {
|
||||
proxyPass = "http://loki";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = ''
|
||||
access_log off;
|
||||
'';
|
||||
};
|
||||
locations."/ready" = {
|
||||
proxyPass = "http://loki";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = ''
|
||||
auth_request off;
|
||||
access_log off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
57
hosts/sentinel/oauth2-proxy.nix
Normal file
57
hosts/sentinel/oauth2-proxy.nix
Normal file
|
@ -0,0 +1,57 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
nodes,
|
||||
...
|
||||
}: let
|
||||
inherit (config.repo.secrets.local) acme personalDomain;
|
||||
authDomain = "auth.${personalDomain}";
|
||||
in {
|
||||
networking.domain = personalDomain;
|
||||
|
||||
# Contains OAUTH2_PROXY_CLIENT_SECRET=...
|
||||
#rekey.secrets.grafana-secret-key = {
|
||||
# file = ./secrets/oauth2-proxy-client-secret.age;
|
||||
# mode = "440";
|
||||
# group = "oauth2_proxy";
|
||||
#};
|
||||
|
||||
services.oauth2_proxy = {
|
||||
enable = true;
|
||||
cookie.secure = true;
|
||||
cookie.httpOnly = false;
|
||||
email.domains = ["*"];
|
||||
provider = "oidc";
|
||||
scope = "openid email";
|
||||
loginURL = "https://${authDomain}/ui/oauth2";
|
||||
redeemURL = "https://${authDomain}/oauth2/token";
|
||||
validateURL = "https://${authDomain}/oauth2/openid/grafana/userinfo";
|
||||
clientID = "oauth2-proxy";
|
||||
clientSecret = "";
|
||||
#keyFile = config.rekey.secrets.oauth2-proxy-client-secret.path;
|
||||
reverseProxy = true;
|
||||
extraConfig.skip-provider-button = true;
|
||||
setXauthrequest = true;
|
||||
};
|
||||
|
||||
# Apply oauth by default to all locations
|
||||
services.nginx.virtualHosts = lib.genAttrs config.services.oauth2_proxy.nginx.virtualHosts (_: {
|
||||
extraConfig = ''
|
||||
auth_request /oauth2/auth;
|
||||
error_page 401 = /oauth2/sign_in;
|
||||
|
||||
# pass information via X-User and X-Email headers to backend,
|
||||
# requires running with --set-xauthrequest flag
|
||||
auth_request_set $user $upstream_http_x_auth_request_user;
|
||||
auth_request_set $email $upstream_http_x_auth_request_email;
|
||||
proxy_set_header X-User $user;
|
||||
proxy_set_header X-Email $email;
|
||||
|
||||
# if you enabled --cookie-refresh, this is needed for it to work with auth_request
|
||||
auth_request_set $auth_cookie $upstream_http_set_cookie;
|
||||
add_header Set-Cookie $auth_cookie;
|
||||
'';
|
||||
locations."/oauth2/".extraConfig = "auth_request off;";
|
||||
locations."/oauth2/auth".extraConfig = "auth_request off;";
|
||||
});
|
||||
}
|
|
@ -73,7 +73,6 @@ in {
|
|||
|
||||
networking.nftables.firewall = {
|
||||
zones = lib.mkForce {
|
||||
#local-vms.interfaces = ["local-vms"];
|
||||
proxy-sentinel.interfaces = ["proxy-sentinel"];
|
||||
sentinel = {
|
||||
parent = "proxy-sentinel";
|
||||
|
@ -241,17 +240,18 @@ in {
|
|||
|
||||
networking.nftables.firewall = {
|
||||
zones = lib.mkForce {
|
||||
local-vms.interfaces = ["local-vms"];
|
||||
grafana = {
|
||||
parent = "local-vms";
|
||||
ipv4Addresses = [nodes."${parentNodeName}-test".config.extra.wireguard."${parentNodeName}-local-vms".ipv4];
|
||||
ipv6Addresses = [nodes."${parentNodeName}-test".config.extra.wireguard."${parentNodeName}-local-vms".ipv6];
|
||||
#local-vms.interfaces = ["local-vms"];
|
||||
proxy-sentinel.interfaces = ["proxy-sentinel"];
|
||||
sentinel = {
|
||||
parent = "proxy-sentinel";
|
||||
ipv4Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv4];
|
||||
ipv6Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv6];
|
||||
};
|
||||
};
|
||||
|
||||
rules = lib.mkForce {
|
||||
local-vms-to-local = {
|
||||
from = ["grafana"];
|
||||
sentinel-to-local = {
|
||||
from = ["sentinel"];
|
||||
to = ["local"];
|
||||
allowedTCPPorts = [3100];
|
||||
};
|
||||
|
@ -272,7 +272,7 @@ in {
|
|||
|
||||
ingester = {
|
||||
lifecycler = {
|
||||
address = "127.0.0.1";
|
||||
interface_names = ["proxy-sentinel"];
|
||||
ring = {
|
||||
kvstore.store = "inmemory";
|
||||
replication_factor = 1;
|
||||
|
|
91
hosts/ward/promtail.nix
Normal file
91
hosts/ward/promtail.nix
Normal file
|
@ -0,0 +1,91 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
parentNodeName,
|
||||
...
|
||||
}: {
|
||||
services.promtail = {
|
||||
enable = true;
|
||||
configuration = {
|
||||
server.http_listen_port = 9080;
|
||||
server.grpc_listen_port = 0;
|
||||
|
||||
clients = [
|
||||
{
|
||||
basic_auth.username = "promtail@thalheim.io";
|
||||
basic_auth.password_file = config.sops.secrets.promtail-password.path;
|
||||
url = "http://loki.r/loki/api/v1/push";
|
||||
}
|
||||
];
|
||||
|
||||
scrape_configs = [
|
||||
{
|
||||
job_name = "journal";
|
||||
journal = {
|
||||
json = true;
|
||||
max_age = "12h";
|
||||
labels.job = "systemd-journal";
|
||||
};
|
||||
pipeline_stages = [
|
||||
{
|
||||
json.expressions = {
|
||||
transport = "_TRANSPORT";
|
||||
unit = "_SYSTEMD_UNIT";
|
||||
msg = "MESSAGE";
|
||||
coredump_cgroup = "COREDUMP_CGROUP";
|
||||
coredump_exe = "COREDUMP_EXE";
|
||||
coredump_cmdline = "COREDUMP_CMDLINE";
|
||||
coredump_uid = "COREDUMP_UID";
|
||||
coredump_gid = "COREDUMP_GID";
|
||||
};
|
||||
}
|
||||
{
|
||||
# Set the unit (defaulting to the transport like audit and kernel)
|
||||
template = {
|
||||
source = "unit";
|
||||
template = "{{if .unit}}{{.unit}}{{else}}{{.transport}}{{end}}";
|
||||
};
|
||||
}
|
||||
{
|
||||
regex = {
|
||||
expression = "(?P<coredump_unit>[^/]+)$";
|
||||
source = "coredump_cgroup";
|
||||
};
|
||||
}
|
||||
{
|
||||
template = {
|
||||
source = "msg";
|
||||
# FIXME would be cleaner to have this in a match block, but could not get it to work
|
||||
template = "{{if .coredump_exe}}{{.coredump_exe}} core dumped (user: {{.coredump_uid}}/{{.coredump_gid}}, command: {{.coredump_cmdline}}){{else}}{{.msg}}{{end}}";
|
||||
};
|
||||
}
|
||||
{
|
||||
labels.coredump_unit = "coredump_unit";
|
||||
}
|
||||
{
|
||||
# Normalize session IDs (session-1234.scope -> session.scope) to limit number of label values
|
||||
replace = {
|
||||
source = "unit";
|
||||
expression = "^(session-\\d+.scope)$";
|
||||
replace = "session.scope";
|
||||
};
|
||||
}
|
||||
{
|
||||
labels.unit = "unit";
|
||||
}
|
||||
{
|
||||
# Write the proper message instead of JSON
|
||||
output.source = "msg";
|
||||
}
|
||||
];
|
||||
relabel_configs = [
|
||||
{
|
||||
source_labels = ["__journal__hostname"];
|
||||
target_label = "host";
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue