1
1
Fork 1
mirror of https://github.com/oddlama/nix-config.git synced 2025-10-10 23:00:39 +02:00

feat: add firezone

This commit is contained in:
oddlama 2025-03-15 12:21:21 +01:00
parent e9d1d338eb
commit 74ef27adcf
No known key found for this signature in database
GPG key ID: 14EFE510775FE39A
18 changed files with 3214 additions and 0 deletions

9
fz/modules/default.nix Normal file
View file

@ -0,0 +1,9 @@
{
imports = [
./gateway.nix
./gui-client.nix
./headless-client.nix
./relay.nix
./server.nix
];
}

159
fz/modules/gateway.nix Normal file
View file

@ -0,0 +1,159 @@
{
lib,
pkgs,
config,
...
}:
let
inherit (lib)
boolToString
getExe
mkEnableOption
mkIf
mkOption
mkPackageOption
types
;
cfg = config.services.firezone.gateway;
in
{
options = {
services.firezone.gateway = {
enable = mkOption {
default = false;
example = true;
description = ''
Whether to enable the firezone gateway.
You have to manually masquerade and forward traffic from the
tun-firezone interface to your resource! Refer to the
[upstream setup script](https://github.com/firezone/firezone/blob/8c7c0a9e8e33ae790aeb75fdb5a15432c2870b79/scripts/gateway-systemd-install.sh#L154-L168)
for a list of iptable commands.
See the firezone nixos test in this repository for an nftables based example.
'';
type = lib.types.bool;
};
package = mkPackageOption pkgs "firezone-gateway" { };
name = mkOption {
type = types.str;
description = "The name of this gateway as shown in firezone";
};
apiUrl = mkOption {
type = types.strMatching "^wss://.+/$";
example = "wss://firezone.example.com/api/";
description = ''
The URL of your firezone server's API. This should be the same
as your server's setting for {option}`services.firezone.server.settings.api.externalUrl`,
but with `wss://` instead of `https://`.
'';
};
tokenFile = mkOption {
type = types.path;
example = "/run/secrets/firezone-gateway-token";
description = ''
A file containing the firezone gateway token. Do not use a nix-store path here
as it will make the token publicly readable!
This file will be passed via systemd credentials, it should only be accessible
by the root user.
'';
};
logLevel = mkOption {
type = types.str;
default = "info";
description = ''
The log level for the firezone application. See
[RUST_LOG](https://docs.rs/env_logger/latest/env_logger/#enabling-logging)
for the format.
'';
};
enableTelemetry = mkEnableOption "telemetry";
};
};
config = mkIf cfg.enable {
systemd.services.firezone-gateway = {
description = "Gateway service for the Firezone zero-trust access platform";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.util-linux ];
script = ''
# If FIREZONE_ID is not given by the user, use a persisted (or newly generated) uuid.
if [[ -z "''${FIREZONE_ID:-}" ]]; then
if [[ ! -e gateway_id ]]; then
uuidgen -r > gateway_id
fi
export FIREZONE_ID=$(< gateway_id)
fi
export FIREZONE_TOKEN=$(< "$CREDENTIALS_DIRECTORY/firezone-token")
exec ${getExe cfg.package}
'';
environment = {
FIREZONE_API_URL = cfg.apiUrl;
FIREZONE_NAME = cfg.name;
FIREZONE_NO_TELEMETRY = boolToString (!cfg.enableTelemetry);
RUST_LOG = cfg.logLevel;
};
serviceConfig = {
Type = "exec";
DynamicUser = true;
User = "firezone-gateway";
LoadCredential = [ "firezone-token:${cfg.tokenFile}" ];
DeviceAllow = "/dev/net/tun";
AmbientCapabilities = [ "CAP_NET_ADMIN" ];
CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
StateDirectory = "firezone-gateway";
WorkingDirectory = "/var/lib/firezone-gateway";
Restart = "on-failure";
RestartSec = 10;
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateMounts = true;
PrivateTmp = true;
PrivateUsers = false;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_NETLINK"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = "@system-service";
UMask = "077";
};
};
};
meta.maintainers = with lib.maintainers; [
oddlama
patrickdag
];
}

142
fz/modules/gui-client.nix Normal file
View file

@ -0,0 +1,142 @@
{
lib,
pkgs,
config,
...
}:
let
inherit (lib)
boolToString
getExe'
mkEnableOption
mkIf
mkOption
mkPackageOption
types
;
cfg = config.services.firezone.gui-client;
in
{
options = {
services.firezone.gui-client = {
enable = mkEnableOption "the firezone gui client";
package = mkPackageOption pkgs "firezone-gui-client" { };
allowedUsers = mkOption {
type = types.listOf types.str;
default = [ ];
description = ''
All listed users will become part of the `firezone-client` group so
they can control the IPC service. This is a convenience option.
'';
};
name = mkOption {
type = types.str;
description = "The name of this client as shown in firezone";
};
logLevel = mkOption {
type = types.str;
default = "info";
description = ''
The log level for the firezone application. See
[RUST_LOG](https://docs.rs/env_logger/latest/env_logger/#enabling-logging)
for the format.
'';
};
enableTelemetry = mkEnableOption "telemetry";
};
};
config = mkIf cfg.enable {
users.groups.firezone-client.members = cfg.allowedUsers;
# Required for deep-link mimetype registration
environment.systemPackages = [ cfg.package ];
# Required for the token store in the gui application
services.gnome.gnome-keyring.enable = true;
systemd.services.firezone-ipc-service = {
description = "GUI IPC service for the Firezone zero-trust access platform";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.util-linux ];
script = ''
# If FIREZONE_ID is not given by the user, use a persisted (or newly generated) uuid.
if [[ -z "''${FIREZONE_ID:-}" ]]; then
if [[ ! -e client_id ]]; then
uuidgen -r > client_id
fi
export FIREZONE_ID=$(< client_id)
fi
# run-debug logs to stdout instead of file, which is preferrable
exec ${getExe' cfg.package "firezone-client-ipc"} run-debug
'';
environment = {
FIREZONE_NAME = cfg.name;
FIREZONE_NO_TELEMETRY = boolToString (!cfg.enableTelemetry);
LOG_DIR = "%L/dev.firezone.client";
RUST_LOG = cfg.logLevel;
};
serviceConfig = {
Type = "notify";
DeviceAllow = "/dev/net/tun";
AmbientCapabilities = [ "CAP_NET_ADMIN" ];
CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
# This block contains hardcoded values in the client, we cannot change these :(
Group = "firezone-client";
RuntimeDirectory = "dev.firezone.client";
StateDirectory = "dev.firezone.client";
WorkingDirectory = "/var/lib/dev.firezone.client";
LogsDirectory = "dev.firezone.client";
Restart = "on-failure";
RestartSec = 10;
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateMounts = true;
PrivateTmp = true;
PrivateUsers = false;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_NETLINK"
"AF_UNIX"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = "@system-service";
UMask = "077";
};
};
};
meta.maintainers = with lib.maintainers; [
oddlama
patrickdag
];
}

View file

@ -0,0 +1,148 @@
{
lib,
pkgs,
config,
...
}:
let
inherit (lib)
boolToString
getExe
mkEnableOption
mkIf
mkOption
mkPackageOption
types
;
cfg = config.services.firezone.headless-client;
in
{
options = {
services.firezone.headless-client = {
enable = mkEnableOption "the firezone headless client";
package = mkPackageOption pkgs "firezone-headless-client" { };
name = mkOption {
type = types.str;
description = "The name of this client as shown in firezone";
};
apiUrl = mkOption {
type = types.strMatching "^wss://.+/$";
example = "wss://firezone.example.com/api/";
description = ''
The URL of your firezone server's API. This should be the same
as your server's setting for {option}`services.firezone.server.settings.api.externalUrl`,
but with `wss://` instead of `https://`.
'';
};
tokenFile = mkOption {
type = types.path;
example = "/run/secrets/firezone-client-token";
description = ''
A file containing the firezone client token. Do not use a nix-store path here
as it will make the token publicly readable!
This file will be passed via systemd credentials, it should only be accessible
by the root user.
'';
};
logLevel = mkOption {
type = types.str;
default = "info";
description = ''
The log level for the firezone application. See
[RUST_LOG](https://docs.rs/env_logger/latest/env_logger/#enabling-logging)
for the format.
'';
};
enableTelemetry = mkEnableOption "telemetry";
};
};
config = mkIf cfg.enable {
systemd.services.firezone-headless-client = {
description = "headless client service for the Firezone zero-trust access platform";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.util-linux ];
script = ''
# If FIREZONE_ID is not given by the user, use a persisted (or newly generated) uuid.
if [[ -z "''${FIREZONE_ID:-}" ]]; then
if [[ ! -e client_id ]]; then
uuidgen -r > client_id
fi
export FIREZONE_ID=$(< client_id)
fi
exec ${getExe cfg.package}
'';
environment = {
FIREZONE_API_URL = cfg.apiUrl;
FIREZONE_NAME = cfg.name;
FIREZONE_NO_TELEMETRY = boolToString (!cfg.enableTelemetry);
FIREZONE_TOKEN_PATH = "%d/firezone-token";
LOG_DIR = "%L/dev.firezone.client";
RUST_LOG = cfg.logLevel;
};
serviceConfig = {
Type = "exec";
LoadCredential = [ "firezone-token:${cfg.tokenFile}" ];
DeviceAllow = "/dev/net/tun";
AmbientCapabilities = [ "CAP_NET_ADMIN" ];
CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
# Hardcoded values in the client :(
RuntimeDirectory = "dev.firezone.client";
StateDirectory = "dev.firezone.client";
WorkingDirectory = "/var/lib/dev.firezone.client";
LogsDirectory = "dev.firezone.client";
Restart = "on-failure";
RestartSec = 10;
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateMounts = true;
PrivateTmp = true;
PrivateUsers = false;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_NETLINK"
"AF_UNIX"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = "@system-service";
UMask = "077";
};
};
};
meta.maintainers = with lib.maintainers; [
oddlama
patrickdag
];
}

709
fz/modules/provision.exs Normal file
View file

@ -0,0 +1,709 @@
defmodule Provision do
alias Domain.{Repo, Accounts, Auth, Actors, Resources, Tokens, Gateways, Relays, Policies}
require Logger
# UUID Mapping handling
defmodule UuidMapping do
@mapping_file "provision-uuids.json"
# Loads the mapping from file
def load do
mappings = case File.read(@mapping_file) do
{:ok, content} ->
case Jason.decode(content) do
{:ok, mapping} -> mapping
_ -> %{"accounts" => %{}}
end
_ -> %{"accounts" => %{}}
end
Process.put(:uuid_mappings, mappings)
mappings
end
# Saves the current mapping (defaulting to the one in the process dictionary)
def save(mapping \\ Process.get(:uuid_mappings)) do
File.write!(@mapping_file, Jason.encode!(mapping))
end
# Retrieves the account-level mapping from a given mapping (or from Process)
def get_account(mapping \\ Process.get(:uuid_mappings), account_slug) do
get_in(mapping, ["accounts", account_slug]) || %{}
end
# Retrieves the entity mapping for a specific account and type
def get_entities(mapping \\ Process.get(:uuid_mappings), account_slug, type) do
get_in(mapping, ["accounts", account_slug, type]) || %{}
end
# Retrieves an entity mapping for a specific account, type and external_id
def get_entity(mapping \\ Process.get(:uuid_mappings), account_slug, type, external_id) do
get_in(mapping, ["accounts", account_slug, type, external_id])
end
# Updates (or creates) the account UUID mapping and stores it in the process dictionary.
def update_account(account_slug, uuid) do
mapping = Process.get(:uuid_mappings) || load()
mapping = ensure_account_exists(mapping, account_slug)
mapping = put_in(mapping, ["accounts", account_slug, "id"], uuid)
Process.put(:uuid_mappings, mapping)
mapping
end
# Ensures that the given account exists in the mapping.
def ensure_account_exists(mapping, account_slug) do
if not Map.has_key?(mapping["accounts"], account_slug) do
put_in(mapping, ["accounts", account_slug], %{})
else
mapping
end
end
# Updates (or creates) the mapping for entities of a given type for the account.
def update_entities(account_slug, type, new_entries) do
mapping = Process.get(:uuid_mappings) || load()
mapping = ensure_account_exists(mapping, account_slug)
current = get_entities(mapping, account_slug, type)
mapping = put_in(mapping, ["accounts", account_slug, type], Map.merge(current, new_entries))
Process.put(:uuid_mappings, mapping)
mapping
end
# Removes an entire account from the mapping.
def remove_account(account_slug) do
mapping = Process.get(:uuid_mappings) || load()
mapping = update_in(mapping, ["accounts"], fn accounts ->
Map.delete(accounts, account_slug)
end)
Process.put(:uuid_mappings, mapping)
mapping
end
# Removes a specific entity mapping for the account.
def remove_entity(account_slug, type, key) do
mapping = Process.get(:uuid_mappings) || load()
mapping = update_in(mapping, ["accounts", account_slug, type], fn entities ->
Map.delete(entities || %{}, key)
end)
Process.put(:uuid_mappings, mapping)
mapping
end
end
defp resolve_references(value) when is_map(value) do
Enum.into(value, %{}, fn {k, v} -> {k, resolve_references(v)} end)
end
defp resolve_references(value) when is_list(value) do
Enum.map(value, &resolve_references/1)
end
defp resolve_references(value) when is_binary(value) do
Regex.replace(~r/\{env:([^}]+)\}/, value, fn _, var ->
System.get_env(var) || raise "Environment variable #{var} not set"
end)
end
defp resolve_references(value), do: value
defp atomize_keys(map) when is_map(map) do
Enum.into(map, %{}, fn {k, v} ->
{
if(is_binary(k), do: String.to_atom(k), else: k),
if(is_map(v), do: atomize_keys(v), else: v)
}
end)
end
defp cleanup_account(uuid) do
case Accounts.fetch_account_by_id_or_slug(uuid) do
{:ok, value} when value.deleted_at == nil ->
Logger.info("Deleting removed account #{value.slug}")
value |> Ecto.Changeset.change(%{ deleted_at: DateTime.utc_now() }) |> Repo.update!()
_ -> :ok
end
end
defp cleanup_actor(uuid, subject) do
case Actors.fetch_actor_by_id(uuid, subject) do
{:ok, value} ->
Logger.info("Deleting removed actor #{value.name}")
{:ok, _} = Actors.delete_actor(value, subject)
_ -> :ok
end
end
defp cleanup_provider(uuid, subject) do
case Auth.fetch_provider_by_id(uuid, subject) do
{:ok, value} ->
Logger.info("Deleting removed provider #{value.name}")
{:ok, _} = Auth.delete_provider(value, subject)
_ -> :ok
end
end
defp cleanup_gateway_group(uuid, subject) do
case Gateways.fetch_group_by_id(uuid, subject) do
{:ok, value} ->
Logger.info("Deleting removed gateway group #{value.name}")
{:ok, _} = Gateways.delete_group(value, subject)
_ -> :ok
end
end
defp cleanup_relay_group(uuid, subject) do
case Relays.fetch_group_by_id(uuid, subject) do
{:ok, value} ->
Logger.info("Deleting removed relay group #{value.name}")
{:ok, _} = Relays.delete_group(value, subject)
_ -> :ok
end
end
defp cleanup_actor_group(uuid, subject) do
case Actors.fetch_group_by_id(uuid, subject) do
{:ok, value} ->
Logger.info("Deleting removed actor group #{value.name}")
{:ok, _} = Actors.delete_group(value, subject)
_ -> :ok
end
end
# Fetch resource by uuid, but follow the chain of replacements if any
defp fetch_resource(uuid, subject) do
case Resources.fetch_resource_by_id(uuid, subject) do
{:ok, resource} when resource.replaced_by_resource_id != nil -> fetch_resource(resource.replaced_by_resource_id, subject)
v -> v
end
end
defp cleanup_resource(uuid, subject) do
case fetch_resource(uuid, subject) do
{:ok, value} when value.deleted_at == nil ->
Logger.info("Deleting removed resource #{value.name}")
{:ok, _} = Resources.delete_resource(value, subject)
_ -> :ok
end
end
# Fetch policy by uuid, but follow the chain of replacements if any
defp fetch_policy(uuid, subject) do
case Policies.fetch_policy_by_id(uuid, subject) do
{:ok, policy} when policy.replaced_by_policy_id != nil -> fetch_policy(policy.replaced_by_policy_id, subject)
v -> v
end
end
defp cleanup_policy(uuid, subject) do
case fetch_policy(uuid, subject) do
{:ok, value} when value.deleted_at == nil ->
Logger.info("Deleting removed policy #{value.description}")
{:ok, _} = Policies.delete_policy(value, subject)
_ -> :ok
end
end
defp cleanup_entity_type(account_slug, entity_type, cleanup_fn, temp_admin_subject) do
# Get mapping for this entity type
existing_entities = UuidMapping.get_entities(account_slug, entity_type)
# Get current entities from account data
current_entities = Process.get(:current_entities)
# Determine which ones to remove
removed_entity_ids = Map.keys(existing_entities) -- (current_entities[entity_type] || [])
# Process each entity to remove
Enum.each(removed_entity_ids, fn entity_id ->
case existing_entities[entity_id] do
nil -> :ok
uuid ->
cleanup_fn.(uuid, temp_admin_subject)
UuidMapping.remove_entity(account_slug, entity_type, entity_id)
end
end)
end
defp collect_current_entities(account_data) do
%{
"actors" => Map.keys(account_data["actors"] || %{}),
"providers" => Map.keys(account_data["auth"] || %{}),
"gateway_groups" => Map.keys(account_data["gatewayGroups"] || %{}),
"relay_groups" => Map.keys(account_data["relayGroups"] || %{}),
"actor_groups" => Map.keys(account_data["groups"] || %{}) ++ ["everyone"],
"resources" => Map.keys(account_data["resources"] || %{}),
"policies" => Map.keys(account_data["policies"] || %{})
}
end
defp nil_if_deleted_or_not_found(value) do
case value do
nil -> nil
{:error, :not_found} -> nil
{:ok, value} when value.deleted_at != nil -> nil
v -> v
end
end
defp create_temp_admin(account, email_provider) do
temp_admin_actor_email = "firezone-provision@localhost.local"
temp_admin_actor_context = %Auth.Context{
type: :browser,
user_agent: "Unspecified/0.0",
remote_ip: {127, 0, 0, 1},
remote_ip_location_region: "N/A",
remote_ip_location_city: "N/A",
remote_ip_location_lat: 0.0,
remote_ip_location_lon: 0.0
}
{:ok, temp_admin_actor} =
Actors.create_actor(account, %{
type: :account_admin_user,
name: "Provisioning"
})
{:ok, temp_admin_actor_email_identity} =
Auth.create_identity(temp_admin_actor, email_provider, %{
provider_identifier: temp_admin_actor_email,
provider_identifier_confirmation: temp_admin_actor_email
})
{:ok, temp_admin_actor_token} =
Auth.create_token(temp_admin_actor_email_identity, temp_admin_actor_context, "temporarynonce", DateTime.utc_now() |> DateTime.add(1, :hour))
{:ok, temp_admin_subject} =
Auth.build_subject(temp_admin_actor_token, temp_admin_actor_context)
{temp_admin_subject, temp_admin_actor, temp_admin_actor_email_identity, temp_admin_actor_token}
end
defp cleanup_temp_admin(temp_admin_actor, temp_admin_actor_email_identity, temp_admin_actor_token, subject) do
Logger.info("Cleaning up temporary admin actor")
{:ok, _} = Tokens.delete_token(temp_admin_actor_token, subject)
{:ok, _} = Auth.delete_identity(temp_admin_actor_email_identity, subject)
{:ok, _} = Actors.delete_actor(temp_admin_actor, subject)
end
def provision() do
Logger.info("Starting provisioning")
# Load desired state
json_file = "provision-state.json"
{:ok, raw_json} = File.read(json_file)
{:ok, %{"accounts" => accounts}} = Jason.decode(raw_json)
accounts = resolve_references(accounts)
# Load existing UUID mappings into the process dictionary.
UuidMapping.load()
# Clean up removed accounts first
current_account_slugs = Map.keys(accounts)
existing_accounts = Map.keys(Process.get(:uuid_mappings)["accounts"])
removed_accounts = existing_accounts -- current_account_slugs
Enum.each(removed_accounts, fn slug ->
if uuid = get_in(Process.get(:uuid_mappings), ["accounts", slug, "id"]) do
cleanup_account(uuid)
# Remove the account from the UUID mapping.
UuidMapping.remove_account(slug)
end
end)
multi = Enum.reduce(accounts, Ecto.Multi.new(), fn {slug, account_data}, multi ->
account_attrs = atomize_keys(%{
name: account_data["name"],
slug: slug,
features: Map.get(account_data, "features", %{}),
metadata: Map.get(account_data, "metadata", %{}),
limits: Map.get(account_data, "limits", %{})
})
multi = multi
|> Ecto.Multi.run({:account, slug}, fn repo, _changes ->
case Accounts.fetch_account_by_id_or_slug(slug) do
{:ok, acc} ->
Logger.info("Updating existing account #{slug}")
updated_acc = acc |> Ecto.Changeset.change(account_attrs) |> repo.update!()
{:ok, {:existing, updated_acc}}
_ ->
Logger.info("Creating new account #{slug}")
{:ok, account} = Accounts.create_account(account_attrs)
Logger.info("Creating internet gateway group")
{:ok, internet_site} = Gateways.create_internet_group(account)
Logger.info("Creating internet resource")
{:ok, _internet_resource} = Resources.create_internet_resource(account, internet_site)
# Store mapping of slug to UUID
UuidMapping.update_account(slug, account.id)
{:ok, {:new, account}}
end
end)
|> Ecto.Multi.run({:everyone_group, slug}, fn _repo, changes ->
case Map.get(changes, {:account, slug}) do
{:new, account} ->
Logger.info("Creating everyone group for new account")
{:ok, actor_group} = Actors.create_managed_group(account, %{name: "Everyone", membership_rules: [%{operator: true}]})
UuidMapping.update_entities(slug, "actor_groups", %{"everyone" => actor_group.id})
{:ok, actor_group}
{:existing, _account} ->
{:ok, :skipped}
end
end)
|> Ecto.Multi.run({:email_provider, slug}, fn _repo, changes ->
case Map.get(changes, {:account, slug}) do
{:new, account} ->
Logger.info("Creating default email provider for new account")
Auth.create_provider(account, %{name: "Email", adapter: :email, adapter_config: %{}})
{:existing, account} ->
Auth.Provider.Query.not_disabled()
|> Auth.Provider.Query.by_adapter(:email)
|> Auth.Provider.Query.by_account_id(account.id)
|> Repo.fetch(Auth.Provider.Query, [])
end
end)
|> Ecto.Multi.run({:temp_admin, slug}, fn _repo, changes ->
{_, account} = changes[{:account, slug}]
email_provider = changes[{:email_provider, slug}]
{:ok, create_temp_admin(account, email_provider)}
end)
# Clean up removed entities for this account after we have an admin subject
multi = multi
|> Ecto.Multi.run({:cleanup_entities, slug}, fn _repo, changes ->
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
# Store current entities in process dictionary for our helper function
current_entities = collect_current_entities(account_data)
Process.put(:current_entities, current_entities)
# Define entity types and their cleanup functions
entity_types = [
{"actors", &cleanup_actor/2},
{"providers", &cleanup_provider/2},
{"gateway_groups", &cleanup_gateway_group/2},
{"relay_groups", &cleanup_relay_group/2},
{"actor_groups", &cleanup_actor_group/2},
{"resources", &cleanup_resource/2},
{"policies", &cleanup_policy/2}
]
# Clean up each entity type
Enum.each(entity_types, fn {entity_type, cleanup_fn} ->
cleanup_entity_type(slug, entity_type, cleanup_fn, temp_admin_subject)
end)
{:ok, :cleaned}
end)
# Create or update actors
multi = Enum.reduce(account_data["actors"] || %{}, multi, fn {external_id, actor_data}, multi ->
actor_attrs = atomize_keys(%{
name: actor_data["name"],
type: String.to_atom(actor_data["type"])
})
Ecto.Multi.run(multi, {:actor, slug, external_id}, fn _repo, changes ->
{_, account} = changes[{:account, slug}]
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
uuid = UuidMapping.get_entity(slug, "actors", external_id)
case uuid && Actors.fetch_actor_by_id(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do
nil ->
Logger.info("Creating new actor #{actor_data["name"]}")
{:ok, actor} = Actors.create_actor(account, actor_attrs)
# Update the mapping without manually handling Process.get/put.
UuidMapping.update_entities(slug, "actors", %{external_id => actor.id})
{:ok, {:new, actor}}
{:ok, existing_actor} ->
Logger.info("Updating existing actor #{actor_data["name"]}")
{:ok, updated_act} = Actors.update_actor(existing_actor, actor_attrs, temp_admin_subject)
{:ok, {:existing, updated_act}}
end
end)
|> Ecto.Multi.run({:actor_identity, slug, external_id}, fn repo, changes ->
email_provider = changes[{:email_provider, slug}]
case Map.get(changes, {:actor, slug, external_id}) do
{:new, actor} ->
Logger.info("Creating actor email identity")
Auth.create_identity(actor, email_provider, %{
provider_identifier: actor_data["email"],
provider_identifier_confirmation: actor_data["email"]
})
{:existing, actor} ->
Logger.info("Updating actor email identity")
{:ok, identity} = Auth.Identity.Query.not_deleted()
|> Auth.Identity.Query.by_actor_id(actor.id)
|> Auth.Identity.Query.by_provider_id(email_provider.id)
|> Repo.fetch(Auth.Identity.Query, [])
{:ok, identity |> Ecto.Changeset.change(%{
provider_identifier: actor_data["email"]
}) |> repo.update!()}
end
end)
end)
# Create or update providers
multi = Enum.reduce(account_data["auth"] || %{}, multi, fn {external_id, provider_data}, multi ->
Ecto.Multi.run(multi, {:provider, slug, external_id}, fn repo, changes ->
provider_attrs = %{
name: provider_data["name"],
adapter: String.to_atom(provider_data["adapter"]),
adapter_config: provider_data["adapter_config"]
}
{_, account} = changes[{:account, slug}]
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
uuid = UuidMapping.get_entity(slug, "providers", external_id)
case uuid && Auth.fetch_provider_by_id(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do
nil ->
Logger.info("Creating new provider #{provider_data["name"]}")
{:ok, provider} = Auth.create_provider(account, provider_attrs)
UuidMapping.update_entities(slug, "providers", %{external_id => provider.id})
{:ok, provider}
{:ok, existing} ->
Logger.info("Updating existing provider #{provider_data["name"]}")
{:ok, existing |> Ecto.Changeset.change(provider_attrs) |> repo.update!()}
end
end)
end)
# Create or update gateway_groups
multi = Enum.reduce(account_data["gatewayGroups"] || %{}, multi, fn {external_id, gateway_group_data}, multi ->
Ecto.Multi.run(multi, {:gateway_group, slug, external_id}, fn _repo, changes ->
gateway_group_attrs = %{
name: gateway_group_data["name"],
tokens: [%{}]
}
{_, account} = changes[{:account, slug}]
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
uuid = UuidMapping.get_entity(slug, "gateway_groups", external_id)
case uuid && Gateways.fetch_group_by_id(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do
nil ->
Logger.info("Creating new gateway group #{gateway_group_data["name"]}")
gateway_group = account
|> Gateways.Group.Changeset.create(gateway_group_attrs, temp_admin_subject)
|> Repo.insert!()
UuidMapping.update_entities(slug, "gateway_groups", %{external_id => gateway_group.id})
{:ok, gateway_group}
{:ok, existing} ->
# Nothing to update
{:ok, existing}
end
end)
end)
# Create or update relay_groups
multi = Enum.reduce(account_data["relayGroups"] || %{}, multi, fn {external_id, relay_group_data}, multi ->
Ecto.Multi.run(multi, {:relay_group, slug, external_id}, fn _repo, changes ->
relay_group_attrs = %{
name: relay_group_data["name"]
}
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
uuid = UuidMapping.get_entity(slug, "relay_groups", external_id)
existing_relay_group = uuid && Relays.fetch_group_by_id(uuid, temp_admin_subject)
case existing_relay_group do
v when v in [nil, {:error, :not_found}] ->
Logger.info("Creating new relay group #{relay_group_data["name"]}")
{:ok, relay_group} = Relays.create_group(relay_group_attrs, temp_admin_subject)
UuidMapping.update_entities(slug, "relay_groups", %{external_id => relay_group.id})
{:ok, relay_group}
{:ok, existing} ->
# Nothing to update
{:ok, existing}
end
end)
end)
# Create or update actor_groups
multi = Enum.reduce(account_data["groups"] || %{}, multi, fn {external_id, actor_group_data}, multi ->
Ecto.Multi.run(multi, {:actor_group, slug, external_id}, fn _repo, changes ->
actor_group_attrs = %{
name: actor_group_data["name"],
type: :static
}
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
uuid = UuidMapping.get_entity(slug, "actor_groups", external_id)
case uuid && Actors.fetch_group_by_id(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do
nil ->
Logger.info("Creating new actor group #{actor_group_data["name"]}")
{:ok, actor_group} = Actors.create_group(actor_group_attrs, temp_admin_subject)
UuidMapping.update_entities(slug, "actor_groups", %{external_id => actor_group.id})
{:ok, actor_group}
{:ok, existing} ->
# Nothing to update
{:ok, existing}
end
end)
|> Ecto.Multi.run({:actor_group_members, slug, external_id}, fn repo, changes ->
{_, account} = changes[{:account, slug}]
group_uuid = UuidMapping.get_entity(slug, "actor_groups", external_id)
memberships =
Actors.Membership.Query.all()
|> Actors.Membership.Query.by_group_id(group_uuid)
|> Actors.Membership.Query.returning_all()
|> Repo.all()
existing_members = Enum.map(memberships, fn membership -> membership.actor_id end)
desired_members = Enum.map(actor_group_data["members"] || [], fn member ->
uuid = UuidMapping.get_entity(slug, "actors", member)
if uuid == nil do
raise "Cannot find provisioned actor #{member} to add to group"
end
uuid
end)
missing_members = desired_members -- existing_members
untracked_members = existing_members -- desired_members
Logger.info("Updating members for actor group #{external_id}")
Enum.each(missing_members || [], fn actor_uuid ->
Logger.info("Adding member #{external_id}")
Actors.Membership.Changeset.upsert(account.id, %Actors.Membership{}, %{
group_id: group_uuid,
actor_id: actor_uuid
})
|> repo.insert!()
end)
if actor_group_data["forceMembers"] == true do
# Remove untracked members
to_delete = Enum.map(untracked_members, fn actor_uuid -> {group_uuid, actor_uuid} end)
if to_delete != [] do
Actors.Membership.Query.by_group_id_and_actor_id({:in, to_delete})
|> repo.delete_all()
end
end
{:ok, nil}
end)
end)
# Create or update resources
multi = Enum.reduce(account_data["resources"] || %{}, multi, fn {external_id, resource_data}, multi ->
Ecto.Multi.run(multi, {:resource, slug, external_id}, fn _repo, changes ->
resource_attrs = %{
type: String.to_atom(resource_data["type"]),
name: resource_data["name"],
address: resource_data["address"],
address_description: resource_data["address_description"],
connections: Enum.map(resource_data["gatewayGroups"] || [], fn group ->
%{gateway_group_id: UuidMapping.get_entity(slug, "gateway_groups", group)}
end),
filters: Enum.map(resource_data["filters"] || [], fn filter ->
%{
ports: filter["ports"] || [],
protocol: String.to_atom(filter["protocol"])
}
end)
}
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
uuid = UuidMapping.get_entity(slug, "resources", external_id)
case uuid && fetch_resource(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do
nil ->
Logger.info("Creating new resource #{resource_data["name"]}")
{:ok, resource} = Resources.create_resource(resource_attrs, temp_admin_subject)
UuidMapping.update_entities(slug, "resources", %{external_id => resource.id})
{:ok, resource}
{:ok, existing} ->
existing = Repo.preload(existing, :connections)
Logger.info("Updating existing resource #{resource_data["name"]}")
only_updated_attrs = resource_attrs
|> Enum.reject(fn {key, value} ->
case key do
# Compare connections by gateway_group_id only
:connections -> value == Enum.map(existing.connections || [], fn conn -> Map.take(conn, [:gateway_group_id]) end)
# Compare filters by ports and protocol only
:filters -> value == Enum.map(existing.filters || [], fn filter -> Map.take(filter, [:ports, :protocol]) end)
_ -> Map.get(existing, key) == value
end
end)
|> Enum.into(%{})
if only_updated_attrs == %{} do
{:ok, existing}
else
resource = case existing |> Resources.update_or_replace_resource(resource_attrs, temp_admin_subject) do
{:replaced, _old, new} ->
UuidMapping.update_entities(slug, "resources", %{external_id => new.id})
new
{:updated, value} -> value
x -> x
end
{:ok, resource}
end
end
end)
end)
# Create or update policies
multi = Enum.reduce(account_data["policies"] || %{}, multi, fn {external_id, policy_data}, multi ->
Ecto.Multi.run(multi, {:policy, slug, external_id}, fn _repo, changes ->
policy_attrs = %{
description: policy_data["description"],
actor_group_id: UuidMapping.get_entity(slug, "actor_groups", policy_data["group"]),
resource_id: UuidMapping.get_entity(slug, "resources", policy_data["resource"])
}
{temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}]
uuid = UuidMapping.get_entity(slug, "policies", external_id)
case uuid && fetch_policy(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do
nil ->
Logger.info("Creating new policy #{policy_data["name"]}")
{:ok, policy} = Policies.create_policy(policy_attrs, temp_admin_subject)
UuidMapping.update_entities(slug, "policies", %{external_id => policy.id})
{:ok, policy}
{:ok, existing} ->
Logger.info("Updating existing policy #{policy_data["name"]}")
only_updated_attrs = policy_attrs
|> Enum.reject(fn {key, value} -> Map.get(existing, key) == value end)
|> Enum.into(%{})
if only_updated_attrs == %{} do
{:ok, existing}
else
policy = case existing |> Policies.update_or_replace_policy(policy_attrs, temp_admin_subject) do
{:replaced, _old, new} ->
UuidMapping.update_entities(slug, "policies", %{external_id => new.id})
new
{:updated, value} -> value
x -> x
end
{:ok, policy}
end
end
end)
end)
# Clean up temporary admin after all operations
multi |> Ecto.Multi.run({:cleanup_temp_admin, slug}, fn _repo, changes ->
{temp_admin_subject, temp_admin_actor, temp_admin_actor_email_identity, temp_admin_actor_token} =
changes[{:temp_admin, slug}]
cleanup_temp_admin(temp_admin_actor, temp_admin_actor_email_identity, temp_admin_actor_token, temp_admin_subject)
{:ok, :cleaned}
end)
end)
|> Ecto.Multi.run({:save_state}, fn _repo, _changes ->
# Save all UUID mappings to disk.
UuidMapping.save()
{:ok, :saved}
end)
case Repo.transaction(multi) do
{:ok, _result} ->
Logger.info("Provisioning completed successfully")
{:error, step, reason, _changes} ->
Logger.error("Provisioning failed at step #{inspect(step)}, no changes were applied: #{inspect(reason)}")
end
end
end
Provision.provision()

202
fz/modules/relay.nix Normal file
View file

@ -0,0 +1,202 @@
{
lib,
pkgs,
config,
...
}:
let
inherit (lib)
boolToString
getExe
mkEnableOption
mkIf
mkOption
mkPackageOption
types
;
cfg = config.services.firezone.relay;
in
{
options = {
services.firezone.relay = {
enable = mkEnableOption "the firezone relay server";
package = mkPackageOption pkgs "firezone-relay" { };
name = mkOption {
type = types.str;
example = "My relay";
description = "The name of this gateway as shown in firezone";
};
publicIpv4 = mkOption {
type = types.nullOr types.str;
default = null;
description = "The public ipv4 address of this relay";
};
publicIpv6 = mkOption {
type = types.nullOr types.str;
default = null;
description = "The public ipv6 address of this relay";
};
openFirewall = mkOption {
type = types.bool;
default = true;
description = "Opens up the main STUN port and the TURN allocation range.";
};
port = mkOption {
type = types.port;
default = 3478;
description = "The port to listen on for STUN messages";
};
lowestPort = mkOption {
type = types.port;
default = 49152;
description = "The lowest port to use in TURN allocation";
};
highestPort = mkOption {
type = types.port;
default = 65535;
description = "The highest port to use in TURN allocation";
};
apiUrl = mkOption {
type = types.strMatching "^wss://.+/$";
example = "wss://firezone.example.com/api/";
description = ''
The URL of your firezone server's API. This should be the same
as your server's setting for {option}`services.firezone.server.settings.api.externalUrl`,
but with `wss://` instead of `https://`.
'';
};
tokenFile = mkOption {
type = types.path;
example = "/run/secrets/firezone-relay-token";
description = ''
A file containing the firezone relay token. Do not use a nix-store path here
as it will make the token publicly readable!
This file will be passed via systemd credentials, it should only be accessible
by the root user.
'';
};
logLevel = mkOption {
type = types.str;
default = "info";
description = ''
The log level for the firezone application. See
[RUST_LOG](https://docs.rs/env_logger/latest/env_logger/#enabling-logging)
for the format.
'';
};
enableTelemetry = mkEnableOption "telemetry";
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = cfg.publicIpv4 != null || cfg.publicIpv6 != null;
message = "At least one of `services.firezone.relay.publicIpv4` and `services.firezone.relay.publicIpv6` must be set";
}
];
networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall [ cfg.port ];
networking.firewall.allowedUDPPortRanges = mkIf cfg.openFirewall [
{
from = cfg.lowestPort;
to = cfg.highestPort;
}
];
systemd.services.firezone-relay = {
description = "relay service for the Firezone zero-trust access platform";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
path = [ pkgs.util-linux ];
script = ''
# If FIREZONE_ID is not given by the user, use a persisted (or newly generated) uuid.
if [[ -z "''${FIREZONE_ID:-}" ]]; then
if [[ ! -e relay_id ]]; then
uuidgen -r > relay_id
fi
export FIREZONE_ID=$(< relay_id)
fi
export FIREZONE_TOKEN=$(< "$CREDENTIALS_DIRECTORY/firezone-token")
exec ${getExe cfg.package}
'';
environment = {
FIREZONE_API_URL = cfg.apiUrl;
FIREZONE_NAME = cfg.name;
FIREZONE_TELEMETRY = boolToString cfg.enableTelemetry;
PUBLIC_IP4_ADDR = cfg.publicIpv4;
PUBLIC_IP6_ADDR = cfg.publicIpv6;
LISTEN_PORT = toString cfg.port;
LOWEST_PORT = toString cfg.lowestPort;
HIGHEST_PORT = toString cfg.highestPort;
RUST_LOG = cfg.logLevel;
LOG_FORMAT = "human";
};
serviceConfig = {
Type = "exec";
DynamicUser = true;
User = "firezone-relay";
LoadCredential = [ "firezone-token:${cfg.tokenFile}" ];
StateDirectory = "firezone-relay";
WorkingDirectory = "/var/lib/firezone-relay";
Restart = "on-failure";
RestartSec = 10;
LockPersonality = true;
MemoryDenyWriteExecute = true;
NoNewPrivileges = true;
PrivateMounts = true;
PrivateTmp = true;
PrivateUsers = false;
ProcSubset = "pid";
ProtectClock = true;
ProtectControlGroups = true;
ProtectHome = true;
ProtectHostname = true;
ProtectKernelLogs = true;
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectProc = "invisible";
ProtectSystem = "strict";
RestrictAddressFamilies = [
"AF_INET"
"AF_INET6"
"AF_NETLINK"
];
RestrictNamespaces = true;
RestrictRealtime = true;
RestrictSUIDSGID = true;
SystemCallArchitectures = "native";
SystemCallFilter = "@system-service";
UMask = "077";
};
};
};
meta.maintainers = with lib.maintainers; [
oddlama
patrickdag
];
}

1211
fz/modules/server.nix Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,45 @@
{
lib,
rustPlatform,
fetchFromGitHub,
nix-update-script,
}:
rustPlatform.buildRustPackage rec {
pname = "firezone-gateway";
version = "1.4.5";
src = fetchFromGitHub {
owner = "firezone";
repo = "firezone";
tag = "gateway-${version}";
hash = "sha256-2MDQyMCQIqV1Kbem53jnE8DGUZ6SrZqp2LpGJXvLBgA=";
};
useFetchCargoVendor = true;
cargoHash = "sha256-Yz9xuH8Eph1pzv0siTpvdSXZLj/AjS5PR06CitK9NdE=";
sourceRoot = "${src.name}/rust";
buildAndTestSubdir = "gateway";
RUSTFLAGS = "--cfg system_certs";
# Required to remove profiling arguments which conflict with this builder
postPatch = ''
rm .cargo/config.toml
'';
passthru.updateScript = nix-update-script {
extraArgs = [
"--version-regex"
"gateway-(.*)"
];
};
meta = {
description = "WireGuard tunnel server for the Firezone zero-trust access platform";
homepage = "https://github.com/firezone/firezone";
license = lib.licenses.asl20;
maintainers = with lib.maintainers; [
oddlama
patrickdag
];
mainProgram = "firezone-gateway";
};
}

View file

@ -0,0 +1,166 @@
{
lib,
rustPlatform,
fetchFromGitHub,
nix-update-script,
stdenvNoCC,
pkg-config,
openssl,
dbus,
zenity,
cargo-tauri,
gdk-pixbuf,
glib,
gobject-introspection,
gtk3,
kdePackages,
libsoup_3,
libayatana-appindicator,
webkitgtk_4_1,
wrapGAppsHook3,
pnpm_9,
nodejs,
makeDesktopItem,
copyDesktopItems,
}:
let
version = "1.4.8";
src = fetchFromGitHub {
owner = "firezone";
repo = "firezone";
tag = "gui-client-${version}";
hash = "sha256-YaT/AdnBLDmoda8CGHG/Nc6RFAW8zqr4fOnTdeARlUA=";
};
frontend = stdenvNoCC.mkDerivation rec {
pname = "firezone-gui-client-frontend";
inherit version src;
pnpmDeps = pnpm_9.fetchDeps {
inherit pname version;
src = "${src}/rust/gui-client";
hash = "sha256-9ywC920EF6UxkXHs+0WWaU8fr5J35/C+0nNGbSVHESE=";
};
pnpmRoot = "rust/gui-client";
nativeBuildInputs = [
pnpm_9.configHook
nodejs
];
buildPhase = ''
runHook preBuild
cd $pnpmRoot
cp node_modules/flowbite/dist/flowbite.min.js src/
pnpm tailwindcss -i src/input.css -o src/output.css
node --max_old_space_size=1024000 ./node_modules/vite/bin/vite.js build
runHook postBuild
'';
installPhase = ''
runHook preInstall
cp -r dist $out
runHook postInstall
'';
};
in
rustPlatform.buildRustPackage rec {
pname = "firezone-gui-client";
inherit version src;
useFetchCargoVendor = true;
cargoHash = "sha256-Yz9xuH8Eph1pzv0siTpvdSXZLj/AjS5PR06CitK9NdE=";
sourceRoot = "${src.name}/rust";
buildAndTestSubdir = "gui-client";
RUSTFLAGS = "--cfg system_certs";
nativeBuildInputs = [
cargo-tauri.hook
pkg-config
wrapGAppsHook3
copyDesktopItems
];
buildInputs = [
openssl
dbus
gdk-pixbuf
glib
gobject-introspection
gtk3
libsoup_3
libayatana-appindicator
webkitgtk_4_1
];
# Required to remove profiling arguments which conflict with this builder
postPatch = ''
rm .cargo/config.toml
ln -s ${frontend} gui-client/dist
'';
# Tries to compile apple specific crates due to workspace dependencies,
# not sure if this can be worked around
doCheck = false;
desktopItems = [
# Additional desktop item to associate deep-links
(makeDesktopItem {
name = "firezone-client-gui-deep-link";
exec = "firezone-client-gui open-deep-link %U";
icon = "firezone-client-gui";
comment = meta.description;
desktopName = "Firezone GUI Client";
categories = [ "Network" ];
noDisplay = true;
mimeTypes = [
"x-scheme-handler/firezone-fd0020211111"
];
})
];
preFixup = ''
gappsWrapperArgs+=(
# Otherwise blank screen, see https://github.com/tauri-apps/tauri/issues/9304
--set WEBKIT_DISABLE_DMABUF_RENDERER 1
--prefix PATH ":" ${
lib.makeBinPath [
zenity
kdePackages.kdialog
]
}
--prefix LD_LIBRARY_PATH ":" ${
lib.makeLibraryPath [
libayatana-appindicator
]
}
)
'';
passthru = {
inherit frontend;
updateScript = nix-update-script {
extraArgs = [
"--version-regex"
"gui-client-(.*)"
];
};
};
meta = {
description = "GUI client for the Firezone zero-trust access platform";
homepage = "https://github.com/firezone/firezone";
license = lib.licenses.asl20;
maintainers = with lib.maintainers; [
oddlama
patrickdag
];
mainProgram = "firezone-gui-client";
};
}

View file

@ -0,0 +1,50 @@
{
lib,
rustPlatform,
fetchFromGitHub,
nix-update-script,
}:
rustPlatform.buildRustPackage rec {
pname = "firezone-headless-client";
version = "1.4.4";
src = fetchFromGitHub {
owner = "firezone";
repo = "firezone";
tag = "headless-client-${version}";
hash = "sha256-2MDQyMCQIqV1Kbem53jnE8DGUZ6SrZqp2LpGJXvLBgA=";
};
useFetchCargoVendor = true;
cargoHash = "sha256-Yz9xuH8Eph1pzv0siTpvdSXZLj/AjS5PR06CitK9NdE=";
sourceRoot = "${src.name}/rust";
buildAndTestSubdir = "headless-client";
RUSTFLAGS = "--cfg system_certs";
# Required to remove profiling arguments which conflict with this builder
postPatch = ''
rm .cargo/config.toml
'';
# Required to run tests
preCheck = ''
export XDG_RUNTIME_DIR=$(mktemp -d)
'';
passthru.updateScript = nix-update-script {
extraArgs = [
"--version-regex"
"headless-client-(.*)"
];
};
meta = {
description = "CLI client for the Firezone zero-trust access platform";
homepage = "https://github.com/firezone/firezone";
license = lib.licenses.asl20;
maintainers = with lib.maintainers; [
oddlama
patrickdag
];
mainProgram = "firezone-headless-client";
};
}

View file

@ -0,0 +1,37 @@
{
lib,
rustPlatform,
fetchFromGitHub,
}:
rustPlatform.buildRustPackage rec {
pname = "firezone-relay";
version = "0-unstable-2025-03-15";
src = fetchFromGitHub {
owner = "firezone";
repo = "firezone";
rev = "09fb5f927410503b0d6e7fc6cf6a2ba06cb5a281";
hash = "sha256-qDeXAzOeTenL6OIsun/rEfPMo62mQT7RhJEmqemzMsM=";
};
useFetchCargoVendor = true;
cargoHash = "sha256-uqy4GgYaSX2kM4a37093lHmhvOtNUhkEs6/ZS1bjuYo=";
sourceRoot = "${src.name}/rust";
buildAndTestSubdir = "relay";
RUSTFLAGS = "--cfg system_certs";
# Required to remove profiling arguments which conflict with this builder
postPatch = ''
rm .cargo/config.toml
'';
meta = {
description = "STUN/TURN server for the Firezone zero-trust access platform";
homepage = "https://github.com/firezone/firezone";
license = lib.licenses.asl20;
maintainers = with lib.maintainers; [
oddlama
patrickdag
];
mainProgram = "firezone-relay";
};
}

View file

@ -0,0 +1,67 @@
diff --git a/elixir/apps/domain/lib/domain/config/definitions.ex b/elixir/apps/domain/lib/domain/config/definitions.ex
index 8cd2e8d0f..92e18b10b 100644
--- a/elixir/apps/domain/lib/domain/config/definitions.ex
+++ b/elixir/apps/domain/lib/domain/config/definitions.ex
@@ -584,6 +590,7 @@ defmodule Domain.Config.Definitions do
Swoosh.Adapters.Mailgun,
Swoosh.Adapters.Mailjet,
Swoosh.Adapters.Mandrill,
+ Swoosh.Adapters.Mua,
Swoosh.Adapters.Postmark,
Swoosh.Adapters.ProtonBridge,
Swoosh.Adapters.SMTP,
diff --git a/elixir/config/runtime.exs b/elixir/config/runtime.exs
index 15037e0a3..475c4ddfb 100644
--- a/elixir/config/runtime.exs
+++ b/elixir/config/runtime.exs
@@ -226,8 +228,15 @@ if config_env() == :prod do
config :domain,
Domain.Mailer,
[
- adapter: compile_config!(:outbound_email_adapter),
- from_email: compile_config!(:outbound_email_from)
+ adapter: compile_config!(:outbound_email_adapter),
+ from_email: compile_config!(:outbound_email_from),
+ protocol: String.to_atom(System.get_env("OUTBOUND_EMAIL_SMTP_PROTOCOL")),
+ relay: System.get_env("OUTBOUND_EMAIL_SMTP_HOST"),
+ port: String.to_integer(System.get_env("OUTBOUND_EMAIL_SMTP_PORT")),
+ auth: [
+ username: System.get_env("OUTBOUND_EMAIL_SMTP_USERNAME"),
+ password: System.get_env("OUTBOUND_EMAIL_SMTP_PASSWORD")
+ ]
] ++ compile_config!(:outbound_email_adapter_opts)
config :workos, WorkOS.Client,
diff --git a/elixir/mix.exs b/elixir/mix.exs
index 12782d631..dee1245d2 100644
--- a/elixir/mix.exs
+++ b/elixir/mix.exs
@@ -47,7 +47,9 @@ defmodule Firezone.MixProject do
# Formatter doesn't track dependencies of children applications
{:phoenix, "~> 1.7.0"},
{:phoenix_live_view, "~> 1.0.0-rc.0"},
- {:floki, "~> 0.37.0"}
+ {:floki, "~> 0.37.0"},
+ {:mua, "~> 0.2.0"},
+ {:mail, "~> 0.3.0"}
]
end
diff --git a/elixir/mix.lock b/elixir/mix.lock
index 8c4b65959..3d2f9faca 100644
--- a/elixir/mix.lock
+++ b/elixir/mix.lock
@@ -50,11 +50,13 @@
"junit_formatter": {:hex, :junit_formatter, "3.4.0", "d0e8db6c34dab6d3c4154c3b46b21540db1109ae709d6cf99ba7e7a2ce4b1ac2", [:mix], [], "hexpm", "bb36e2ae83f1ced6ab931c4ce51dd3dbef1ef61bb4932412e173b0cfa259dacd"},
"libcluster": {:hex, :libcluster, "3.3.3", "a4f17721a19004cfc4467268e17cff8b1f951befe428975dd4f6f7b84d927fe0", [:mix], [{:jason, "~> 1.1", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "7c0a2275a0bb83c07acd17dab3c3bfb4897b145106750eeccc62d302e3bdfee5"},
"logger_json": {:hex, :logger_json, "6.2.0", "13e2e9f5f7b195865c5c3ef3d296c3ad50e7ecb038d899433702a79e979b91d7", [:mix], [{:ecto, "~> 3.11", [hex: :ecto, repo: "hexpm", optional: true]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: true]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "98366d02bedbb56e41b25a6d248d566d4f4bc224bae2b1e982df00ed04ba9219"},
+ "mail": {:hex, :mail, "0.3.1", "cb0a14e4ed8904e4e5a08214e686ccf6f9099346885db17d8c309381f865cc5c", [:mix], [], "hexpm", "1db701e89865c1d5fa296b2b57b1cd587587cca8d8a1a22892b35ef5a8e352a6"},
"metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"},
"mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"},
"mimerl": {:hex, :mimerl, "1.3.0", "d0cd9fc04b9061f82490f6581e0128379830e78535e017f7780f37fea7545726", [:rebar3], [], "hexpm", "a1e15a50d1887217de95f0b9b0793e32853f7c258a5cd227650889b38839fe9d"},
"mint": {:hex, :mint, "1.6.2", "af6d97a4051eee4f05b5500671d47c3a67dac7386045d87a904126fd4bbcea2e", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "5ee441dffc1892f1ae59127f74afe8fd82fda6587794278d924e4d90ea3d63f9"},
"mix_audit": {:hex, :mix_audit, "2.1.4", "0a23d5b07350cdd69001c13882a4f5fb9f90fbd4cbf2ebc190a2ee0d187ea3e9", [:make, :mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:yaml_elixir, "~> 2.11", [hex: :yaml_elixir, repo: "hexpm", optional: false]}], "hexpm", "fd807653cc8c1cada2911129c7eb9e985e3cc76ebf26f4dd628bb25bbcaa7099"},
+ "mua": {:hex, :mua, "0.2.4", "a9172ab0a1ac8732cf2699d739ceac3febcb9b4ffc540260ad2e32c0b6632af9", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}], "hexpm", "e7e4dacd5ad65f13e3542772e74a159c00bd2d5579e729e9bb72d2c73a266fb7"},
"multipart": {:hex, :multipart, "0.4.0", "634880a2148d4555d050963373d0e3bbb44a55b2badd87fa8623166172e9cda0", [:mix], [{:mime, "~> 1.2 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}], "hexpm", "3c5604bc2fb17b3137e5d2abdf5dacc2647e60c5cc6634b102cf1aef75a06f0a"},
"nimble_csv": {:hex, :nimble_csv, "1.2.0", "4e26385d260c61eba9d4412c71cea34421f296d5353f914afe3f2e71cce97722", [:mix], [], "hexpm", "d0628117fcc2148178b034044c55359b26966c6eaa8e2ce15777be3bbc91b12a"},
"nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"},

View file

@ -0,0 +1,90 @@
{
lib,
nixosTests,
fetchFromGitHub,
beamPackages,
gitMinimal,
pnpm_9,
nodejs,
tailwindcss_3,
esbuild,
mixReleaseName ? "domain", # "domain" "web" or "api"
}:
beamPackages.mixRelease rec {
pname = "firezone-server-${mixReleaseName}";
version = "0-unstable-2025-03-15";
src = "${
fetchFromGitHub {
owner = "firezone";
repo = "firezone";
rev = "09fb5f927410503b0d6e7fc6cf6a2ba06cb5a281";
hash = "sha256-1CZBFhOwX0DfXykPQ9tzn4tHg2tSnByXEPtlZleHK5k=";
# This is necessary to allow sending mails via SMTP, as the default
# SMTP adapter is current broken: https://github.com/swoosh/swoosh/issues/785
postFetch = ''
${lib.getExe gitMinimal} -C $out apply ${./0000-add-mua.patch}
'';
}
}/elixir";
pnpmDeps = pnpm_9.fetchDeps {
inherit pname version;
src = "${src}/apps/web/assets";
hash = "sha256-ejyBppFtKeyVhAWmssglbpLleOnbw9d4B+iM5Vtx47A=";
};
pnpmRoot = "apps/web/assets";
preBuild = ''
cat >> config/config.exs <<EOF
config :tailwind, path: "${lib.getExe tailwindcss_3}"
config :esbuild, path: "${lib.getExe esbuild}"
EOF
cat >> config/runtime.exs <<EOF
config :tzdata, :data_dir, System.get_env("TZDATA_DIR")
EOF
'';
postBuild = ''
pushd apps/web
# for external task you need a workaround for the no deps check flag
# https://github.com/phoenixframework/phoenix/issues/2690
mix do deps.loadpaths --no-deps-check, assets.deploy
mix do deps.loadpaths --no-deps-check, phx.digest priv/static
popd
'';
nativeBuildInputs = [
pnpm_9
pnpm_9.configHook
nodejs
];
inherit mixReleaseName;
mixFodDeps = beamPackages.fetchMixDeps {
pname = "mix-deps-${pname}-${version}";
inherit src version;
hash = "sha256-2Y9u5+o8+RG+c8Z6V7Vex5K1odI7a/WYj5fC0xWbVRo=";
};
passthru.tests = {
inherit (nixosTests) firezone;
};
meta = {
description = "Backend server for the Firezone zero-trust access platform";
homepage = "https://github.com/firezone/firezone";
license = lib.licenses.elastic20;
maintainers = with lib.maintainers; [
oddlama
patrickdag
];
mainProgram = mixReleaseName;
platforms = lib.platforms.linux;
};
}

160
hosts/sentinel/firezone.nix Normal file
View file

@ -0,0 +1,160 @@
{
config,
globals,
lib,
...
}:
let
firezoneDomain = "firezone.${globals.domains.me}";
homeDomains = [
globals.services.grafana.domain
globals.services.immich.domain
globals.services.influxdb.domain
globals.services.loki.domain
globals.services.paperless.domain
globals.services.esphome.domain
globals.services.home-assistant.domain
"fritzbox.${globals.domains.personal}"
];
allow = group: resource: {
"${group}@${resource}" = {
inherit group resource;
description = "Allow ${group} access to ${resource}";
};
};
in
{
age.secrets.firezone-smtp-password = {
generator.script = "alnum";
mode = "440";
group = "firezone";
};
environment.persistence."/persist".directories = [
{
directory = "/var/lib/private/firezone";
mode = "0700";
}
];
globals.services.firezone.domain = firezoneDomain;
globals.monitoring.http.firezone = {
url = "https://${firezoneDomain}/";
network = "internet";
expectedBodyRegex = "Welcome to Firezone";
};
services.firezone.server = {
enable = true;
enableLocalDB = true;
smtp = {
inherit (config.repo.secrets.local.firezone.mail) from host username;
port = 465;
implicitTls = true;
passwordFile = config.age.secrets.firezone-smtp-password.file;
};
provision = {
enable = true;
accounts.main = {
name = "Home";
relayGroups.relays.name = "Relays";
gatewayGroups.home.name = "Home";
actors.admin = {
type = "account_admin_user";
name = "Admin";
email = "admin@${globals.domains.me}";
};
# FIXME: dont hardcode, filter global service domains by internal state
# FIXME: new entry here? make new adguardhome entry too.
resources =
lib.genAttrs homeDomains (domain: {
type = "dns";
name = domain;
address = domain;
gatewayGroups = [ "home" ];
filters = [
{ protocol = "icmp"; }
{
protocol = "tcp";
ports = [
443
80
];
}
{
protocol = "udp";
ports = [ 443 ];
}
];
})
// {
"home.vlan-services.v4" = {
type = "cidr";
name = "home.vlan-services.v4";
address = globals.net.home-lan.vlans.services.cidrv4;
gatewayGroups = [ "home" ];
};
"home.vlan-services.v6" = {
type = "cidr";
name = "home.vlan-services.v6";
address = globals.net.home-lan.vlans.services.cidrv6;
gatewayGroups = [ "home" ];
};
};
policies =
{ }
// allow "everyone" "home.vlan-services.v4"
// allow "everyone" "home.vlan-services.v6"
// lib.genAttrs homeDomains (domain: allow "everyone" domain);
};
};
api.externalUrl = "https://${firezoneDomain}/api/";
web.externalUrl = "https://${firezoneDomain}/";
};
services.nginx = {
upstreams.firezone = {
servers."127.0.0.1:${toString config.services.firezone.server.web.port}" = { };
extraConfig = ''
zone firezone 64k;
keepalive 2;
'';
monitoring = {
enable = true;
expectedBodyRegex = "Welcome to Firezone";
};
};
upstreams.firezone-api = {
servers."127.0.0.1:${toString config.services.firezone.server.api.port}" = { };
extraConfig = ''
zone firezone 64k;
keepalive 2;
'';
monitoring = {
enable = true;
expectedStatus = 404;
expectedBodyRegex = ''{"error":{"reason":"Not Found"}}'';
};
};
virtualHosts.${firezoneDomain} = {
forceSSL = true;
useACMEWildcardHost = true;
locations."/" = {
# The trailing slash is important to strip the location prefix from the request
proxyPass = "http://firezone/";
proxyWebsockets = true;
};
locations."/api/" = {
# The trailing slash is important to strip the location prefix from the request
proxyPass = "http://firezone-api/";
proxyWebsockets = true;
};
};
};
}

Binary file not shown.

View file

@ -109,6 +109,7 @@ in
})
[
# FIXME: dont hardcode, filter global service domains by internal state
# FIXME: new entry here? make new firezone entry too.
globals.services.grafana.domain
globals.services.immich.domain
globals.services.influxdb.domain

View file

@ -11,5 +11,6 @@
./promtail.nix
./secrets.nix
./telegraf.nix
../fz/modules
];
}

View file

@ -20,6 +20,23 @@ _inputs: [
# })
# ];
firezone-server-api = prev.callPackage ../fz/pkgs/firezone-server/package.nix {
mixReleaseName = "api";
};
firezone-server-domain = prev.callPackage ../fz/pkgs/firezone-server/package.nix {
mixReleaseName = "domain";
};
firezone-server-web = prev.callPackage ../fz/pkgs/firezone-server/package.nix {
mixReleaseName = "web";
};
firezone-gateway = prev.callPackage ../fz/pkgs/firezone-gateway/package.nix { };
firezone-relay = prev.callPackage ../fz/pkgs/firezone-relay/package.nix { };
firezone-gui-client = prev.callPackage ../fz/pkgs/firezone-gui-client/package.nix { };
firezone-headless-client = prev.callPackage ../fz/pkgs/firezone-headless-client/package.nix { };
mdns-repeater = prev.callPackage ./mdns-repeater.nix { };
formats = prev.formats // {