wip: prepare testing caddy over nginx with oauth2-proxy

This commit is contained in:
oddlama 2023-06-04 21:42:28 +02:00
parent 7f2f93b640
commit c5a863ce51
No known key found for this signature in database
GPG key ID: 14EFE510775FE39A
7 changed files with 245 additions and 10 deletions

View file

@ -73,7 +73,6 @@ in {
networking.nftables.firewall = {
zones = lib.mkForce {
#local-vms.interfaces = ["local-vms"];
proxy-sentinel.interfaces = ["proxy-sentinel"];
sentinel = {
parent = "proxy-sentinel";
@ -241,17 +240,18 @@ in {
networking.nftables.firewall = {
zones = lib.mkForce {
local-vms.interfaces = ["local-vms"];
grafana = {
parent = "local-vms";
ipv4Addresses = [nodes."${parentNodeName}-test".config.extra.wireguard."${parentNodeName}-local-vms".ipv4];
ipv6Addresses = [nodes."${parentNodeName}-test".config.extra.wireguard."${parentNodeName}-local-vms".ipv6];
#local-vms.interfaces = ["local-vms"];
proxy-sentinel.interfaces = ["proxy-sentinel"];
sentinel = {
parent = "proxy-sentinel";
ipv4Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv4];
ipv6Addresses = [nodes.sentinel.config.extra.wireguard.proxy-sentinel.ipv6];
};
};
rules = lib.mkForce {
local-vms-to-local = {
from = ["grafana"];
sentinel-to-local = {
from = ["sentinel"];
to = ["local"];
allowedTCPPorts = [3100];
};
@ -272,7 +272,7 @@ in {
ingester = {
lifecycler = {
address = "127.0.0.1";
interface_names = ["proxy-sentinel"];
ring = {
kvstore.store = "inmemory";
replication_factor = 1;

91
hosts/ward/promtail.nix Normal file
View file

@ -0,0 +1,91 @@
{
lib,
config,
parentNodeName,
...
}: {
services.promtail = {
enable = true;
configuration = {
server.http_listen_port = 9080;
server.grpc_listen_port = 0;
clients = [
{
basic_auth.username = "promtail@thalheim.io";
basic_auth.password_file = config.sops.secrets.promtail-password.path;
url = "http://loki.r/loki/api/v1/push";
}
];
scrape_configs = [
{
job_name = "journal";
journal = {
json = true;
max_age = "12h";
labels.job = "systemd-journal";
};
pipeline_stages = [
{
json.expressions = {
transport = "_TRANSPORT";
unit = "_SYSTEMD_UNIT";
msg = "MESSAGE";
coredump_cgroup = "COREDUMP_CGROUP";
coredump_exe = "COREDUMP_EXE";
coredump_cmdline = "COREDUMP_CMDLINE";
coredump_uid = "COREDUMP_UID";
coredump_gid = "COREDUMP_GID";
};
}
{
# Set the unit (defaulting to the transport like audit and kernel)
template = {
source = "unit";
template = "{{if .unit}}{{.unit}}{{else}}{{.transport}}{{end}}";
};
}
{
regex = {
expression = "(?P<coredump_unit>[^/]+)$";
source = "coredump_cgroup";
};
}
{
template = {
source = "msg";
# FIXME would be cleaner to have this in a match block, but could not get it to work
template = "{{if .coredump_exe}}{{.coredump_exe}} core dumped (user: {{.coredump_uid}}/{{.coredump_gid}}, command: {{.coredump_cmdline}}){{else}}{{.msg}}{{end}}";
};
}
{
labels.coredump_unit = "coredump_unit";
}
{
# Normalize session IDs (session-1234.scope -> session.scope) to limit number of label values
replace = {
source = "unit";
expression = "^(session-\\d+.scope)$";
replace = "session.scope";
};
}
{
labels.unit = "unit";
}
{
# Write the proper message instead of JSON
output.source = "msg";
}
];
relabel_configs = [
{
source_labels = ["__journal__hostname"];
target_label = "host";
}
];
}
];
};
};
}