diff --git a/fz/modules/default.nix b/fz/modules/default.nix new file mode 100644 index 0000000..e24aef5 --- /dev/null +++ b/fz/modules/default.nix @@ -0,0 +1,9 @@ +{ + imports = [ + ./gateway.nix + ./gui-client.nix + ./headless-client.nix + ./relay.nix + ./server.nix + ]; +} diff --git a/fz/modules/gateway.nix b/fz/modules/gateway.nix new file mode 100644 index 0000000..9154780 --- /dev/null +++ b/fz/modules/gateway.nix @@ -0,0 +1,159 @@ +{ + lib, + pkgs, + config, + ... +}: +let + inherit (lib) + boolToString + getExe + mkEnableOption + mkIf + mkOption + mkPackageOption + types + ; + + cfg = config.services.firezone.gateway; +in +{ + options = { + services.firezone.gateway = { + enable = mkOption { + default = false; + example = true; + description = '' + Whether to enable the firezone gateway. + + You have to manually masquerade and forward traffic from the + tun-firezone interface to your resource! Refer to the + [upstream setup script](https://github.com/firezone/firezone/blob/8c7c0a9e8e33ae790aeb75fdb5a15432c2870b79/scripts/gateway-systemd-install.sh#L154-L168) + for a list of iptable commands. + + See the firezone nixos test in this repository for an nftables based example. + ''; + type = lib.types.bool; + }; + package = mkPackageOption pkgs "firezone-gateway" { }; + + name = mkOption { + type = types.str; + description = "The name of this gateway as shown in firezone"; + }; + + apiUrl = mkOption { + type = types.strMatching "^wss://.+/$"; + example = "wss://firezone.example.com/api/"; + description = '' + The URL of your firezone server's API. This should be the same + as your server's setting for {option}`services.firezone.server.settings.api.externalUrl`, + but with `wss://` instead of `https://`. + ''; + }; + + tokenFile = mkOption { + type = types.path; + example = "/run/secrets/firezone-gateway-token"; + description = '' + A file containing the firezone gateway token. Do not use a nix-store path here + as it will make the token publicly readable! + + This file will be passed via systemd credentials, it should only be accessible + by the root user. + ''; + }; + + logLevel = mkOption { + type = types.str; + default = "info"; + description = '' + The log level for the firezone application. See + [RUST_LOG](https://docs.rs/env_logger/latest/env_logger/#enabling-logging) + for the format. + ''; + }; + + enableTelemetry = mkEnableOption "telemetry"; + }; + }; + + config = mkIf cfg.enable { + systemd.services.firezone-gateway = { + description = "Gateway service for the Firezone zero-trust access platform"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + + path = [ pkgs.util-linux ]; + script = '' + # If FIREZONE_ID is not given by the user, use a persisted (or newly generated) uuid. + if [[ -z "''${FIREZONE_ID:-}" ]]; then + if [[ ! -e gateway_id ]]; then + uuidgen -r > gateway_id + fi + export FIREZONE_ID=$(< gateway_id) + fi + + export FIREZONE_TOKEN=$(< "$CREDENTIALS_DIRECTORY/firezone-token") + exec ${getExe cfg.package} + ''; + + environment = { + FIREZONE_API_URL = cfg.apiUrl; + FIREZONE_NAME = cfg.name; + FIREZONE_NO_TELEMETRY = boolToString (!cfg.enableTelemetry); + RUST_LOG = cfg.logLevel; + }; + + serviceConfig = { + Type = "exec"; + DynamicUser = true; + User = "firezone-gateway"; + LoadCredential = [ "firezone-token:${cfg.tokenFile}" ]; + + DeviceAllow = "/dev/net/tun"; + AmbientCapabilities = [ "CAP_NET_ADMIN" ]; + CapabilityBoundingSet = [ "CAP_NET_ADMIN" ]; + + StateDirectory = "firezone-gateway"; + WorkingDirectory = "/var/lib/firezone-gateway"; + + Restart = "on-failure"; + RestartSec = 10; + + LockPersonality = true; + MemoryDenyWriteExecute = true; + NoNewPrivileges = true; + PrivateMounts = true; + PrivateTmp = true; + PrivateUsers = false; + ProcSubset = "pid"; + ProtectClock = true; + ProtectControlGroups = true; + ProtectHome = true; + ProtectHostname = true; + ProtectKernelLogs = true; + ProtectKernelModules = true; + ProtectKernelTunables = true; + ProtectProc = "invisible"; + ProtectSystem = "strict"; + RestrictAddressFamilies = [ + "AF_INET" + "AF_INET6" + "AF_NETLINK" + ]; + RestrictNamespaces = true; + RestrictRealtime = true; + RestrictSUIDSGID = true; + SystemCallArchitectures = "native"; + SystemCallFilter = "@system-service"; + UMask = "077"; + }; + }; + }; + + meta.maintainers = with lib.maintainers; [ + oddlama + patrickdag + ]; +} diff --git a/fz/modules/gui-client.nix b/fz/modules/gui-client.nix new file mode 100644 index 0000000..b46a7e0 --- /dev/null +++ b/fz/modules/gui-client.nix @@ -0,0 +1,142 @@ +{ + lib, + pkgs, + config, + ... +}: +let + inherit (lib) + boolToString + getExe' + mkEnableOption + mkIf + mkOption + mkPackageOption + types + ; + + cfg = config.services.firezone.gui-client; +in +{ + options = { + services.firezone.gui-client = { + enable = mkEnableOption "the firezone gui client"; + package = mkPackageOption pkgs "firezone-gui-client" { }; + + allowedUsers = mkOption { + type = types.listOf types.str; + default = [ ]; + description = '' + All listed users will become part of the `firezone-client` group so + they can control the IPC service. This is a convenience option. + ''; + }; + + name = mkOption { + type = types.str; + description = "The name of this client as shown in firezone"; + }; + + logLevel = mkOption { + type = types.str; + default = "info"; + description = '' + The log level for the firezone application. See + [RUST_LOG](https://docs.rs/env_logger/latest/env_logger/#enabling-logging) + for the format. + ''; + }; + + enableTelemetry = mkEnableOption "telemetry"; + }; + }; + + config = mkIf cfg.enable { + users.groups.firezone-client.members = cfg.allowedUsers; + + # Required for deep-link mimetype registration + environment.systemPackages = [ cfg.package ]; + + # Required for the token store in the gui application + services.gnome.gnome-keyring.enable = true; + + systemd.services.firezone-ipc-service = { + description = "GUI IPC service for the Firezone zero-trust access platform"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + + path = [ pkgs.util-linux ]; + script = '' + # If FIREZONE_ID is not given by the user, use a persisted (or newly generated) uuid. + if [[ -z "''${FIREZONE_ID:-}" ]]; then + if [[ ! -e client_id ]]; then + uuidgen -r > client_id + fi + export FIREZONE_ID=$(< client_id) + fi + + # run-debug logs to stdout instead of file, which is preferrable + exec ${getExe' cfg.package "firezone-client-ipc"} run-debug + ''; + + environment = { + FIREZONE_NAME = cfg.name; + FIREZONE_NO_TELEMETRY = boolToString (!cfg.enableTelemetry); + LOG_DIR = "%L/dev.firezone.client"; + RUST_LOG = cfg.logLevel; + }; + + serviceConfig = { + Type = "notify"; + + DeviceAllow = "/dev/net/tun"; + AmbientCapabilities = [ "CAP_NET_ADMIN" ]; + CapabilityBoundingSet = [ "CAP_NET_ADMIN" ]; + + # This block contains hardcoded values in the client, we cannot change these :( + Group = "firezone-client"; + RuntimeDirectory = "dev.firezone.client"; + StateDirectory = "dev.firezone.client"; + WorkingDirectory = "/var/lib/dev.firezone.client"; + LogsDirectory = "dev.firezone.client"; + + Restart = "on-failure"; + RestartSec = 10; + + LockPersonality = true; + MemoryDenyWriteExecute = true; + NoNewPrivileges = true; + PrivateMounts = true; + PrivateTmp = true; + PrivateUsers = false; + ProcSubset = "pid"; + ProtectClock = true; + ProtectControlGroups = true; + ProtectHome = true; + ProtectHostname = true; + ProtectKernelLogs = true; + ProtectKernelModules = true; + ProtectKernelTunables = true; + ProtectProc = "invisible"; + ProtectSystem = "strict"; + RestrictAddressFamilies = [ + "AF_INET" + "AF_INET6" + "AF_NETLINK" + "AF_UNIX" + ]; + RestrictNamespaces = true; + RestrictRealtime = true; + RestrictSUIDSGID = true; + SystemCallArchitectures = "native"; + SystemCallFilter = "@system-service"; + UMask = "077"; + }; + }; + }; + + meta.maintainers = with lib.maintainers; [ + oddlama + patrickdag + ]; +} diff --git a/fz/modules/headless-client.nix b/fz/modules/headless-client.nix new file mode 100644 index 0000000..c369fe5 --- /dev/null +++ b/fz/modules/headless-client.nix @@ -0,0 +1,148 @@ +{ + lib, + pkgs, + config, + ... +}: +let + inherit (lib) + boolToString + getExe + mkEnableOption + mkIf + mkOption + mkPackageOption + types + ; + + cfg = config.services.firezone.headless-client; +in +{ + options = { + services.firezone.headless-client = { + enable = mkEnableOption "the firezone headless client"; + package = mkPackageOption pkgs "firezone-headless-client" { }; + + name = mkOption { + type = types.str; + description = "The name of this client as shown in firezone"; + }; + + apiUrl = mkOption { + type = types.strMatching "^wss://.+/$"; + example = "wss://firezone.example.com/api/"; + description = '' + The URL of your firezone server's API. This should be the same + as your server's setting for {option}`services.firezone.server.settings.api.externalUrl`, + but with `wss://` instead of `https://`. + ''; + }; + + tokenFile = mkOption { + type = types.path; + example = "/run/secrets/firezone-client-token"; + description = '' + A file containing the firezone client token. Do not use a nix-store path here + as it will make the token publicly readable! + + This file will be passed via systemd credentials, it should only be accessible + by the root user. + ''; + }; + + logLevel = mkOption { + type = types.str; + default = "info"; + description = '' + The log level for the firezone application. See + [RUST_LOG](https://docs.rs/env_logger/latest/env_logger/#enabling-logging) + for the format. + ''; + }; + + enableTelemetry = mkEnableOption "telemetry"; + }; + }; + + config = mkIf cfg.enable { + systemd.services.firezone-headless-client = { + description = "headless client service for the Firezone zero-trust access platform"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + + path = [ pkgs.util-linux ]; + script = '' + # If FIREZONE_ID is not given by the user, use a persisted (or newly generated) uuid. + if [[ -z "''${FIREZONE_ID:-}" ]]; then + if [[ ! -e client_id ]]; then + uuidgen -r > client_id + fi + export FIREZONE_ID=$(< client_id) + fi + + exec ${getExe cfg.package} + ''; + + environment = { + FIREZONE_API_URL = cfg.apiUrl; + FIREZONE_NAME = cfg.name; + FIREZONE_NO_TELEMETRY = boolToString (!cfg.enableTelemetry); + FIREZONE_TOKEN_PATH = "%d/firezone-token"; + LOG_DIR = "%L/dev.firezone.client"; + RUST_LOG = cfg.logLevel; + }; + + serviceConfig = { + Type = "exec"; + LoadCredential = [ "firezone-token:${cfg.tokenFile}" ]; + + DeviceAllow = "/dev/net/tun"; + AmbientCapabilities = [ "CAP_NET_ADMIN" ]; + CapabilityBoundingSet = [ "CAP_NET_ADMIN" ]; + + # Hardcoded values in the client :( + RuntimeDirectory = "dev.firezone.client"; + StateDirectory = "dev.firezone.client"; + WorkingDirectory = "/var/lib/dev.firezone.client"; + LogsDirectory = "dev.firezone.client"; + + Restart = "on-failure"; + RestartSec = 10; + + LockPersonality = true; + MemoryDenyWriteExecute = true; + NoNewPrivileges = true; + PrivateMounts = true; + PrivateTmp = true; + PrivateUsers = false; + ProcSubset = "pid"; + ProtectClock = true; + ProtectControlGroups = true; + ProtectHome = true; + ProtectHostname = true; + ProtectKernelLogs = true; + ProtectKernelModules = true; + ProtectKernelTunables = true; + ProtectProc = "invisible"; + ProtectSystem = "strict"; + RestrictAddressFamilies = [ + "AF_INET" + "AF_INET6" + "AF_NETLINK" + "AF_UNIX" + ]; + RestrictNamespaces = true; + RestrictRealtime = true; + RestrictSUIDSGID = true; + SystemCallArchitectures = "native"; + SystemCallFilter = "@system-service"; + UMask = "077"; + }; + }; + }; + + meta.maintainers = with lib.maintainers; [ + oddlama + patrickdag + ]; +} diff --git a/fz/modules/provision.exs b/fz/modules/provision.exs new file mode 100644 index 0000000..14909ec --- /dev/null +++ b/fz/modules/provision.exs @@ -0,0 +1,709 @@ +defmodule Provision do + alias Domain.{Repo, Accounts, Auth, Actors, Resources, Tokens, Gateways, Relays, Policies} + require Logger + + # UUID Mapping handling + defmodule UuidMapping do + @mapping_file "provision-uuids.json" + + # Loads the mapping from file + def load do + mappings = case File.read(@mapping_file) do + {:ok, content} -> + case Jason.decode(content) do + {:ok, mapping} -> mapping + _ -> %{"accounts" => %{}} + end + + _ -> %{"accounts" => %{}} + end + Process.put(:uuid_mappings, mappings) + mappings + end + + # Saves the current mapping (defaulting to the one in the process dictionary) + def save(mapping \\ Process.get(:uuid_mappings)) do + File.write!(@mapping_file, Jason.encode!(mapping)) + end + + # Retrieves the account-level mapping from a given mapping (or from Process) + def get_account(mapping \\ Process.get(:uuid_mappings), account_slug) do + get_in(mapping, ["accounts", account_slug]) || %{} + end + + # Retrieves the entity mapping for a specific account and type + def get_entities(mapping \\ Process.get(:uuid_mappings), account_slug, type) do + get_in(mapping, ["accounts", account_slug, type]) || %{} + end + + # Retrieves an entity mapping for a specific account, type and external_id + def get_entity(mapping \\ Process.get(:uuid_mappings), account_slug, type, external_id) do + get_in(mapping, ["accounts", account_slug, type, external_id]) + end + + # Updates (or creates) the account UUID mapping and stores it in the process dictionary. + def update_account(account_slug, uuid) do + mapping = Process.get(:uuid_mappings) || load() + mapping = ensure_account_exists(mapping, account_slug) + mapping = put_in(mapping, ["accounts", account_slug, "id"], uuid) + Process.put(:uuid_mappings, mapping) + mapping + end + + # Ensures that the given account exists in the mapping. + def ensure_account_exists(mapping, account_slug) do + if not Map.has_key?(mapping["accounts"], account_slug) do + put_in(mapping, ["accounts", account_slug], %{}) + else + mapping + end + end + + # Updates (or creates) the mapping for entities of a given type for the account. + def update_entities(account_slug, type, new_entries) do + mapping = Process.get(:uuid_mappings) || load() + mapping = ensure_account_exists(mapping, account_slug) + current = get_entities(mapping, account_slug, type) + mapping = put_in(mapping, ["accounts", account_slug, type], Map.merge(current, new_entries)) + Process.put(:uuid_mappings, mapping) + mapping + end + + # Removes an entire account from the mapping. + def remove_account(account_slug) do + mapping = Process.get(:uuid_mappings) || load() + mapping = update_in(mapping, ["accounts"], fn accounts -> + Map.delete(accounts, account_slug) + end) + Process.put(:uuid_mappings, mapping) + mapping + end + + # Removes a specific entity mapping for the account. + def remove_entity(account_slug, type, key) do + mapping = Process.get(:uuid_mappings) || load() + mapping = update_in(mapping, ["accounts", account_slug, type], fn entities -> + Map.delete(entities || %{}, key) + end) + Process.put(:uuid_mappings, mapping) + mapping + end + end + + defp resolve_references(value) when is_map(value) do + Enum.into(value, %{}, fn {k, v} -> {k, resolve_references(v)} end) + end + + defp resolve_references(value) when is_list(value) do + Enum.map(value, &resolve_references/1) + end + + defp resolve_references(value) when is_binary(value) do + Regex.replace(~r/\{env:([^}]+)\}/, value, fn _, var -> + System.get_env(var) || raise "Environment variable #{var} not set" + end) + end + + defp resolve_references(value), do: value + + defp atomize_keys(map) when is_map(map) do + Enum.into(map, %{}, fn {k, v} -> + { + if(is_binary(k), do: String.to_atom(k), else: k), + if(is_map(v), do: atomize_keys(v), else: v) + } + end) + end + + defp cleanup_account(uuid) do + case Accounts.fetch_account_by_id_or_slug(uuid) do + {:ok, value} when value.deleted_at == nil -> + Logger.info("Deleting removed account #{value.slug}") + value |> Ecto.Changeset.change(%{ deleted_at: DateTime.utc_now() }) |> Repo.update!() + _ -> :ok + end + end + + defp cleanup_actor(uuid, subject) do + case Actors.fetch_actor_by_id(uuid, subject) do + {:ok, value} -> + Logger.info("Deleting removed actor #{value.name}") + {:ok, _} = Actors.delete_actor(value, subject) + _ -> :ok + end + end + + defp cleanup_provider(uuid, subject) do + case Auth.fetch_provider_by_id(uuid, subject) do + {:ok, value} -> + Logger.info("Deleting removed provider #{value.name}") + {:ok, _} = Auth.delete_provider(value, subject) + _ -> :ok + end + end + + defp cleanup_gateway_group(uuid, subject) do + case Gateways.fetch_group_by_id(uuid, subject) do + {:ok, value} -> + Logger.info("Deleting removed gateway group #{value.name}") + {:ok, _} = Gateways.delete_group(value, subject) + _ -> :ok + end + end + + defp cleanup_relay_group(uuid, subject) do + case Relays.fetch_group_by_id(uuid, subject) do + {:ok, value} -> + Logger.info("Deleting removed relay group #{value.name}") + {:ok, _} = Relays.delete_group(value, subject) + _ -> :ok + end + end + + defp cleanup_actor_group(uuid, subject) do + case Actors.fetch_group_by_id(uuid, subject) do + {:ok, value} -> + Logger.info("Deleting removed actor group #{value.name}") + {:ok, _} = Actors.delete_group(value, subject) + _ -> :ok + end + end + + # Fetch resource by uuid, but follow the chain of replacements if any + defp fetch_resource(uuid, subject) do + case Resources.fetch_resource_by_id(uuid, subject) do + {:ok, resource} when resource.replaced_by_resource_id != nil -> fetch_resource(resource.replaced_by_resource_id, subject) + v -> v + end + end + + defp cleanup_resource(uuid, subject) do + case fetch_resource(uuid, subject) do + {:ok, value} when value.deleted_at == nil -> + Logger.info("Deleting removed resource #{value.name}") + {:ok, _} = Resources.delete_resource(value, subject) + _ -> :ok + end + end + + # Fetch policy by uuid, but follow the chain of replacements if any + defp fetch_policy(uuid, subject) do + case Policies.fetch_policy_by_id(uuid, subject) do + {:ok, policy} when policy.replaced_by_policy_id != nil -> fetch_policy(policy.replaced_by_policy_id, subject) + v -> v + end + end + + defp cleanup_policy(uuid, subject) do + case fetch_policy(uuid, subject) do + {:ok, value} when value.deleted_at == nil -> + Logger.info("Deleting removed policy #{value.description}") + {:ok, _} = Policies.delete_policy(value, subject) + _ -> :ok + end + end + + defp cleanup_entity_type(account_slug, entity_type, cleanup_fn, temp_admin_subject) do + # Get mapping for this entity type + existing_entities = UuidMapping.get_entities(account_slug, entity_type) + # Get current entities from account data + current_entities = Process.get(:current_entities) + # Determine which ones to remove + removed_entity_ids = Map.keys(existing_entities) -- (current_entities[entity_type] || []) + + # Process each entity to remove + Enum.each(removed_entity_ids, fn entity_id -> + case existing_entities[entity_id] do + nil -> :ok + uuid -> + cleanup_fn.(uuid, temp_admin_subject) + UuidMapping.remove_entity(account_slug, entity_type, entity_id) + end + end) + end + + defp collect_current_entities(account_data) do + %{ + "actors" => Map.keys(account_data["actors"] || %{}), + "providers" => Map.keys(account_data["auth"] || %{}), + "gateway_groups" => Map.keys(account_data["gatewayGroups"] || %{}), + "relay_groups" => Map.keys(account_data["relayGroups"] || %{}), + "actor_groups" => Map.keys(account_data["groups"] || %{}) ++ ["everyone"], + "resources" => Map.keys(account_data["resources"] || %{}), + "policies" => Map.keys(account_data["policies"] || %{}) + } + end + + defp nil_if_deleted_or_not_found(value) do + case value do + nil -> nil + {:error, :not_found} -> nil + {:ok, value} when value.deleted_at != nil -> nil + v -> v + end + end + + defp create_temp_admin(account, email_provider) do + temp_admin_actor_email = "firezone-provision@localhost.local" + temp_admin_actor_context = %Auth.Context{ + type: :browser, + user_agent: "Unspecified/0.0", + remote_ip: {127, 0, 0, 1}, + remote_ip_location_region: "N/A", + remote_ip_location_city: "N/A", + remote_ip_location_lat: 0.0, + remote_ip_location_lon: 0.0 + } + + {:ok, temp_admin_actor} = + Actors.create_actor(account, %{ + type: :account_admin_user, + name: "Provisioning" + }) + + {:ok, temp_admin_actor_email_identity} = + Auth.create_identity(temp_admin_actor, email_provider, %{ + provider_identifier: temp_admin_actor_email, + provider_identifier_confirmation: temp_admin_actor_email + }) + + {:ok, temp_admin_actor_token} = + Auth.create_token(temp_admin_actor_email_identity, temp_admin_actor_context, "temporarynonce", DateTime.utc_now() |> DateTime.add(1, :hour)) + + {:ok, temp_admin_subject} = + Auth.build_subject(temp_admin_actor_token, temp_admin_actor_context) + + {temp_admin_subject, temp_admin_actor, temp_admin_actor_email_identity, temp_admin_actor_token} + end + + defp cleanup_temp_admin(temp_admin_actor, temp_admin_actor_email_identity, temp_admin_actor_token, subject) do + Logger.info("Cleaning up temporary admin actor") + {:ok, _} = Tokens.delete_token(temp_admin_actor_token, subject) + {:ok, _} = Auth.delete_identity(temp_admin_actor_email_identity, subject) + {:ok, _} = Actors.delete_actor(temp_admin_actor, subject) + end + + def provision() do + Logger.info("Starting provisioning") + + # Load desired state + json_file = "provision-state.json" + {:ok, raw_json} = File.read(json_file) + {:ok, %{"accounts" => accounts}} = Jason.decode(raw_json) + accounts = resolve_references(accounts) + + # Load existing UUID mappings into the process dictionary. + UuidMapping.load() + + # Clean up removed accounts first + current_account_slugs = Map.keys(accounts) + existing_accounts = Map.keys(Process.get(:uuid_mappings)["accounts"]) + removed_accounts = existing_accounts -- current_account_slugs + + Enum.each(removed_accounts, fn slug -> + if uuid = get_in(Process.get(:uuid_mappings), ["accounts", slug, "id"]) do + cleanup_account(uuid) + # Remove the account from the UUID mapping. + UuidMapping.remove_account(slug) + end + end) + + multi = Enum.reduce(accounts, Ecto.Multi.new(), fn {slug, account_data}, multi -> + account_attrs = atomize_keys(%{ + name: account_data["name"], + slug: slug, + features: Map.get(account_data, "features", %{}), + metadata: Map.get(account_data, "metadata", %{}), + limits: Map.get(account_data, "limits", %{}) + }) + + multi = multi + |> Ecto.Multi.run({:account, slug}, fn repo, _changes -> + case Accounts.fetch_account_by_id_or_slug(slug) do + {:ok, acc} -> + Logger.info("Updating existing account #{slug}") + updated_acc = acc |> Ecto.Changeset.change(account_attrs) |> repo.update!() + {:ok, {:existing, updated_acc}} + _ -> + Logger.info("Creating new account #{slug}") + {:ok, account} = Accounts.create_account(account_attrs) + + Logger.info("Creating internet gateway group") + {:ok, internet_site} = Gateways.create_internet_group(account) + + Logger.info("Creating internet resource") + {:ok, _internet_resource} = Resources.create_internet_resource(account, internet_site) + + # Store mapping of slug to UUID + UuidMapping.update_account(slug, account.id) + {:ok, {:new, account}} + end + end) + |> Ecto.Multi.run({:everyone_group, slug}, fn _repo, changes -> + case Map.get(changes, {:account, slug}) do + {:new, account} -> + Logger.info("Creating everyone group for new account") + {:ok, actor_group} = Actors.create_managed_group(account, %{name: "Everyone", membership_rules: [%{operator: true}]}) + UuidMapping.update_entities(slug, "actor_groups", %{"everyone" => actor_group.id}) + {:ok, actor_group} + {:existing, _account} -> + {:ok, :skipped} + end + end) + |> Ecto.Multi.run({:email_provider, slug}, fn _repo, changes -> + case Map.get(changes, {:account, slug}) do + {:new, account} -> + Logger.info("Creating default email provider for new account") + Auth.create_provider(account, %{name: "Email", adapter: :email, adapter_config: %{}}) + {:existing, account} -> + Auth.Provider.Query.not_disabled() + |> Auth.Provider.Query.by_adapter(:email) + |> Auth.Provider.Query.by_account_id(account.id) + |> Repo.fetch(Auth.Provider.Query, []) + end + end) + |> Ecto.Multi.run({:temp_admin, slug}, fn _repo, changes -> + {_, account} = changes[{:account, slug}] + email_provider = changes[{:email_provider, slug}] + {:ok, create_temp_admin(account, email_provider)} + end) + + # Clean up removed entities for this account after we have an admin subject + multi = multi + |> Ecto.Multi.run({:cleanup_entities, slug}, fn _repo, changes -> + {temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}] + + # Store current entities in process dictionary for our helper function + current_entities = collect_current_entities(account_data) + Process.put(:current_entities, current_entities) + + # Define entity types and their cleanup functions + entity_types = [ + {"actors", &cleanup_actor/2}, + {"providers", &cleanup_provider/2}, + {"gateway_groups", &cleanup_gateway_group/2}, + {"relay_groups", &cleanup_relay_group/2}, + {"actor_groups", &cleanup_actor_group/2}, + {"resources", &cleanup_resource/2}, + {"policies", &cleanup_policy/2} + ] + + # Clean up each entity type + Enum.each(entity_types, fn {entity_type, cleanup_fn} -> + cleanup_entity_type(slug, entity_type, cleanup_fn, temp_admin_subject) + end) + + {:ok, :cleaned} + end) + + # Create or update actors + multi = Enum.reduce(account_data["actors"] || %{}, multi, fn {external_id, actor_data}, multi -> + actor_attrs = atomize_keys(%{ + name: actor_data["name"], + type: String.to_atom(actor_data["type"]) + }) + + Ecto.Multi.run(multi, {:actor, slug, external_id}, fn _repo, changes -> + {_, account} = changes[{:account, slug}] + {temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}] + uuid = UuidMapping.get_entity(slug, "actors", external_id) + case uuid && Actors.fetch_actor_by_id(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do + nil -> + Logger.info("Creating new actor #{actor_data["name"]}") + {:ok, actor} = Actors.create_actor(account, actor_attrs) + # Update the mapping without manually handling Process.get/put. + UuidMapping.update_entities(slug, "actors", %{external_id => actor.id}) + {:ok, {:new, actor}} + {:ok, existing_actor} -> + Logger.info("Updating existing actor #{actor_data["name"]}") + {:ok, updated_act} = Actors.update_actor(existing_actor, actor_attrs, temp_admin_subject) + {:ok, {:existing, updated_act}} + end + end) + |> Ecto.Multi.run({:actor_identity, slug, external_id}, fn repo, changes -> + email_provider = changes[{:email_provider, slug}] + case Map.get(changes, {:actor, slug, external_id}) do + {:new, actor} -> + Logger.info("Creating actor email identity") + Auth.create_identity(actor, email_provider, %{ + provider_identifier: actor_data["email"], + provider_identifier_confirmation: actor_data["email"] + }) + {:existing, actor} -> + Logger.info("Updating actor email identity") + {:ok, identity} = Auth.Identity.Query.not_deleted() + |> Auth.Identity.Query.by_actor_id(actor.id) + |> Auth.Identity.Query.by_provider_id(email_provider.id) + |> Repo.fetch(Auth.Identity.Query, []) + + {:ok, identity |> Ecto.Changeset.change(%{ + provider_identifier: actor_data["email"] + }) |> repo.update!()} + end + end) + end) + + # Create or update providers + multi = Enum.reduce(account_data["auth"] || %{}, multi, fn {external_id, provider_data}, multi -> + Ecto.Multi.run(multi, {:provider, slug, external_id}, fn repo, changes -> + provider_attrs = %{ + name: provider_data["name"], + adapter: String.to_atom(provider_data["adapter"]), + adapter_config: provider_data["adapter_config"] + } + + {_, account} = changes[{:account, slug}] + {temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}] + uuid = UuidMapping.get_entity(slug, "providers", external_id) + case uuid && Auth.fetch_provider_by_id(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do + nil -> + Logger.info("Creating new provider #{provider_data["name"]}") + {:ok, provider} = Auth.create_provider(account, provider_attrs) + UuidMapping.update_entities(slug, "providers", %{external_id => provider.id}) + {:ok, provider} + {:ok, existing} -> + Logger.info("Updating existing provider #{provider_data["name"]}") + {:ok, existing |> Ecto.Changeset.change(provider_attrs) |> repo.update!()} + end + end) + end) + + # Create or update gateway_groups + multi = Enum.reduce(account_data["gatewayGroups"] || %{}, multi, fn {external_id, gateway_group_data}, multi -> + Ecto.Multi.run(multi, {:gateway_group, slug, external_id}, fn _repo, changes -> + gateway_group_attrs = %{ + name: gateway_group_data["name"], + tokens: [%{}] + } + + {_, account} = changes[{:account, slug}] + {temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}] + uuid = UuidMapping.get_entity(slug, "gateway_groups", external_id) + case uuid && Gateways.fetch_group_by_id(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do + nil -> + Logger.info("Creating new gateway group #{gateway_group_data["name"]}") + gateway_group = account + |> Gateways.Group.Changeset.create(gateway_group_attrs, temp_admin_subject) + |> Repo.insert!() + UuidMapping.update_entities(slug, "gateway_groups", %{external_id => gateway_group.id}) + {:ok, gateway_group} + {:ok, existing} -> + # Nothing to update + {:ok, existing} + end + end) + end) + + # Create or update relay_groups + multi = Enum.reduce(account_data["relayGroups"] || %{}, multi, fn {external_id, relay_group_data}, multi -> + Ecto.Multi.run(multi, {:relay_group, slug, external_id}, fn _repo, changes -> + relay_group_attrs = %{ + name: relay_group_data["name"] + } + + {temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}] + uuid = UuidMapping.get_entity(slug, "relay_groups", external_id) + existing_relay_group = uuid && Relays.fetch_group_by_id(uuid, temp_admin_subject) + case existing_relay_group do + v when v in [nil, {:error, :not_found}] -> + Logger.info("Creating new relay group #{relay_group_data["name"]}") + {:ok, relay_group} = Relays.create_group(relay_group_attrs, temp_admin_subject) + UuidMapping.update_entities(slug, "relay_groups", %{external_id => relay_group.id}) + {:ok, relay_group} + {:ok, existing} -> + # Nothing to update + {:ok, existing} + end + end) + end) + + # Create or update actor_groups + multi = Enum.reduce(account_data["groups"] || %{}, multi, fn {external_id, actor_group_data}, multi -> + Ecto.Multi.run(multi, {:actor_group, slug, external_id}, fn _repo, changes -> + actor_group_attrs = %{ + name: actor_group_data["name"], + type: :static + } + + {temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}] + uuid = UuidMapping.get_entity(slug, "actor_groups", external_id) + case uuid && Actors.fetch_group_by_id(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do + nil -> + Logger.info("Creating new actor group #{actor_group_data["name"]}") + {:ok, actor_group} = Actors.create_group(actor_group_attrs, temp_admin_subject) + UuidMapping.update_entities(slug, "actor_groups", %{external_id => actor_group.id}) + {:ok, actor_group} + {:ok, existing} -> + # Nothing to update + {:ok, existing} + end + end) + |> Ecto.Multi.run({:actor_group_members, slug, external_id}, fn repo, changes -> + {_, account} = changes[{:account, slug}] + group_uuid = UuidMapping.get_entity(slug, "actor_groups", external_id) + + memberships = + Actors.Membership.Query.all() + |> Actors.Membership.Query.by_group_id(group_uuid) + |> Actors.Membership.Query.returning_all() + |> Repo.all() + + existing_members = Enum.map(memberships, fn membership -> membership.actor_id end) + desired_members = Enum.map(actor_group_data["members"] || [], fn member -> + uuid = UuidMapping.get_entity(slug, "actors", member) + if uuid == nil do + raise "Cannot find provisioned actor #{member} to add to group" + end + uuid + end) + + missing_members = desired_members -- existing_members + untracked_members = existing_members -- desired_members + + Logger.info("Updating members for actor group #{external_id}") + Enum.each(missing_members || [], fn actor_uuid -> + Logger.info("Adding member #{external_id}") + Actors.Membership.Changeset.upsert(account.id, %Actors.Membership{}, %{ + group_id: group_uuid, + actor_id: actor_uuid + }) + |> repo.insert!() + end) + + if actor_group_data["forceMembers"] == true do + # Remove untracked members + to_delete = Enum.map(untracked_members, fn actor_uuid -> {group_uuid, actor_uuid} end) + if to_delete != [] do + Actors.Membership.Query.by_group_id_and_actor_id({:in, to_delete}) + |> repo.delete_all() + end + end + + {:ok, nil} + end) + end) + + # Create or update resources + multi = Enum.reduce(account_data["resources"] || %{}, multi, fn {external_id, resource_data}, multi -> + Ecto.Multi.run(multi, {:resource, slug, external_id}, fn _repo, changes -> + resource_attrs = %{ + type: String.to_atom(resource_data["type"]), + name: resource_data["name"], + address: resource_data["address"], + address_description: resource_data["address_description"], + connections: Enum.map(resource_data["gatewayGroups"] || [], fn group -> + %{gateway_group_id: UuidMapping.get_entity(slug, "gateway_groups", group)} + end), + filters: Enum.map(resource_data["filters"] || [], fn filter -> + %{ + ports: filter["ports"] || [], + protocol: String.to_atom(filter["protocol"]) + } + end) + } + + {temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}] + uuid = UuidMapping.get_entity(slug, "resources", external_id) + case uuid && fetch_resource(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do + nil -> + Logger.info("Creating new resource #{resource_data["name"]}") + {:ok, resource} = Resources.create_resource(resource_attrs, temp_admin_subject) + UuidMapping.update_entities(slug, "resources", %{external_id => resource.id}) + {:ok, resource} + {:ok, existing} -> + existing = Repo.preload(existing, :connections) + Logger.info("Updating existing resource #{resource_data["name"]}") + only_updated_attrs = resource_attrs + |> Enum.reject(fn {key, value} -> + case key do + # Compare connections by gateway_group_id only + :connections -> value == Enum.map(existing.connections || [], fn conn -> Map.take(conn, [:gateway_group_id]) end) + # Compare filters by ports and protocol only + :filters -> value == Enum.map(existing.filters || [], fn filter -> Map.take(filter, [:ports, :protocol]) end) + _ -> Map.get(existing, key) == value + end + end) + |> Enum.into(%{}) + + if only_updated_attrs == %{} do + {:ok, existing} + else + resource = case existing |> Resources.update_or_replace_resource(resource_attrs, temp_admin_subject) do + {:replaced, _old, new} -> + UuidMapping.update_entities(slug, "resources", %{external_id => new.id}) + new + {:updated, value} -> value + x -> x + end + + {:ok, resource} + end + end + end) + end) + + # Create or update policies + multi = Enum.reduce(account_data["policies"] || %{}, multi, fn {external_id, policy_data}, multi -> + Ecto.Multi.run(multi, {:policy, slug, external_id}, fn _repo, changes -> + policy_attrs = %{ + description: policy_data["description"], + actor_group_id: UuidMapping.get_entity(slug, "actor_groups", policy_data["group"]), + resource_id: UuidMapping.get_entity(slug, "resources", policy_data["resource"]) + } + + {temp_admin_subject, _, _, _} = changes[{:temp_admin, slug}] + uuid = UuidMapping.get_entity(slug, "policies", external_id) + case uuid && fetch_policy(uuid, temp_admin_subject) |> nil_if_deleted_or_not_found() do + nil -> + Logger.info("Creating new policy #{policy_data["name"]}") + {:ok, policy} = Policies.create_policy(policy_attrs, temp_admin_subject) + UuidMapping.update_entities(slug, "policies", %{external_id => policy.id}) + {:ok, policy} + {:ok, existing} -> + Logger.info("Updating existing policy #{policy_data["name"]}") + only_updated_attrs = policy_attrs + |> Enum.reject(fn {key, value} -> Map.get(existing, key) == value end) + |> Enum.into(%{}) + + if only_updated_attrs == %{} do + {:ok, existing} + else + policy = case existing |> Policies.update_or_replace_policy(policy_attrs, temp_admin_subject) do + {:replaced, _old, new} -> + UuidMapping.update_entities(slug, "policies", %{external_id => new.id}) + new + {:updated, value} -> value + x -> x + end + + {:ok, policy} + end + end + end) + end) + + # Clean up temporary admin after all operations + multi |> Ecto.Multi.run({:cleanup_temp_admin, slug}, fn _repo, changes -> + {temp_admin_subject, temp_admin_actor, temp_admin_actor_email_identity, temp_admin_actor_token} = + changes[{:temp_admin, slug}] + + cleanup_temp_admin(temp_admin_actor, temp_admin_actor_email_identity, temp_admin_actor_token, temp_admin_subject) + {:ok, :cleaned} + end) + end) + |> Ecto.Multi.run({:save_state}, fn _repo, _changes -> + # Save all UUID mappings to disk. + UuidMapping.save() + {:ok, :saved} + end) + + case Repo.transaction(multi) do + {:ok, _result} -> + Logger.info("Provisioning completed successfully") + {:error, step, reason, _changes} -> + Logger.error("Provisioning failed at step #{inspect(step)}, no changes were applied: #{inspect(reason)}") + end + end +end + +Provision.provision() diff --git a/fz/modules/relay.nix b/fz/modules/relay.nix new file mode 100644 index 0000000..b5bc86a --- /dev/null +++ b/fz/modules/relay.nix @@ -0,0 +1,202 @@ +{ + lib, + pkgs, + config, + ... +}: +let + inherit (lib) + boolToString + getExe + mkEnableOption + mkIf + mkOption + mkPackageOption + types + ; + + cfg = config.services.firezone.relay; +in +{ + options = { + services.firezone.relay = { + enable = mkEnableOption "the firezone relay server"; + package = mkPackageOption pkgs "firezone-relay" { }; + + name = mkOption { + type = types.str; + example = "My relay"; + description = "The name of this gateway as shown in firezone"; + }; + + publicIpv4 = mkOption { + type = types.nullOr types.str; + default = null; + description = "The public ipv4 address of this relay"; + }; + + publicIpv6 = mkOption { + type = types.nullOr types.str; + default = null; + description = "The public ipv6 address of this relay"; + }; + + openFirewall = mkOption { + type = types.bool; + default = true; + description = "Opens up the main STUN port and the TURN allocation range."; + }; + + port = mkOption { + type = types.port; + default = 3478; + description = "The port to listen on for STUN messages"; + }; + + lowestPort = mkOption { + type = types.port; + default = 49152; + description = "The lowest port to use in TURN allocation"; + }; + + highestPort = mkOption { + type = types.port; + default = 65535; + description = "The highest port to use in TURN allocation"; + }; + + apiUrl = mkOption { + type = types.strMatching "^wss://.+/$"; + example = "wss://firezone.example.com/api/"; + description = '' + The URL of your firezone server's API. This should be the same + as your server's setting for {option}`services.firezone.server.settings.api.externalUrl`, + but with `wss://` instead of `https://`. + ''; + }; + + tokenFile = mkOption { + type = types.path; + example = "/run/secrets/firezone-relay-token"; + description = '' + A file containing the firezone relay token. Do not use a nix-store path here + as it will make the token publicly readable! + + This file will be passed via systemd credentials, it should only be accessible + by the root user. + ''; + }; + + logLevel = mkOption { + type = types.str; + default = "info"; + description = '' + The log level for the firezone application. See + [RUST_LOG](https://docs.rs/env_logger/latest/env_logger/#enabling-logging) + for the format. + ''; + }; + + enableTelemetry = mkEnableOption "telemetry"; + }; + }; + + config = mkIf cfg.enable { + assertions = [ + { + assertion = cfg.publicIpv4 != null || cfg.publicIpv6 != null; + message = "At least one of `services.firezone.relay.publicIpv4` and `services.firezone.relay.publicIpv6` must be set"; + } + ]; + + networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall [ cfg.port ]; + networking.firewall.allowedUDPPortRanges = mkIf cfg.openFirewall [ + { + from = cfg.lowestPort; + to = cfg.highestPort; + } + ]; + + systemd.services.firezone-relay = { + description = "relay service for the Firezone zero-trust access platform"; + after = [ "network.target" ]; + wantedBy = [ "multi-user.target" ]; + + path = [ pkgs.util-linux ]; + script = '' + # If FIREZONE_ID is not given by the user, use a persisted (or newly generated) uuid. + if [[ -z "''${FIREZONE_ID:-}" ]]; then + if [[ ! -e relay_id ]]; then + uuidgen -r > relay_id + fi + export FIREZONE_ID=$(< relay_id) + fi + + export FIREZONE_TOKEN=$(< "$CREDENTIALS_DIRECTORY/firezone-token") + exec ${getExe cfg.package} + ''; + + environment = { + FIREZONE_API_URL = cfg.apiUrl; + FIREZONE_NAME = cfg.name; + FIREZONE_TELEMETRY = boolToString cfg.enableTelemetry; + + PUBLIC_IP4_ADDR = cfg.publicIpv4; + PUBLIC_IP6_ADDR = cfg.publicIpv6; + + LISTEN_PORT = toString cfg.port; + LOWEST_PORT = toString cfg.lowestPort; + HIGHEST_PORT = toString cfg.highestPort; + + RUST_LOG = cfg.logLevel; + LOG_FORMAT = "human"; + }; + + serviceConfig = { + Type = "exec"; + DynamicUser = true; + User = "firezone-relay"; + LoadCredential = [ "firezone-token:${cfg.tokenFile}" ]; + + StateDirectory = "firezone-relay"; + WorkingDirectory = "/var/lib/firezone-relay"; + + Restart = "on-failure"; + RestartSec = 10; + + LockPersonality = true; + MemoryDenyWriteExecute = true; + NoNewPrivileges = true; + PrivateMounts = true; + PrivateTmp = true; + PrivateUsers = false; + ProcSubset = "pid"; + ProtectClock = true; + ProtectControlGroups = true; + ProtectHome = true; + ProtectHostname = true; + ProtectKernelLogs = true; + ProtectKernelModules = true; + ProtectKernelTunables = true; + ProtectProc = "invisible"; + ProtectSystem = "strict"; + RestrictAddressFamilies = [ + "AF_INET" + "AF_INET6" + "AF_NETLINK" + ]; + RestrictNamespaces = true; + RestrictRealtime = true; + RestrictSUIDSGID = true; + SystemCallArchitectures = "native"; + SystemCallFilter = "@system-service"; + UMask = "077"; + }; + }; + }; + + meta.maintainers = with lib.maintainers; [ + oddlama + patrickdag + ]; +} diff --git a/fz/modules/server.nix b/fz/modules/server.nix new file mode 100644 index 0000000..5d5210a --- /dev/null +++ b/fz/modules/server.nix @@ -0,0 +1,1211 @@ +{ + lib, + pkgs, + config, + ... +}: +let + inherit (lib) + attrNames + boolToString + concatLines + concatLists + concatMapAttrs + concatStringsSep + filterAttrs + filterAttrsRecursive + flip + forEach + getExe + isBool + mapAttrs + mapAttrsToList + mkDefault + mkEnableOption + mkIf + mkMerge + mkOption + mkPackageOption + optionalAttrs + optionalString + recursiveUpdate + subtractLists + toUpper + types + ; + + cfg = config.services.firezone.server; + jsonFormat = pkgs.formats.json { }; + availableAuthAdapters = [ + "email" + "openid_connect" + "userpass" + "token" + "google_workspace" + "microsoft_entra" + "okta" + "jumpcloud" + ]; + + typePortRange = + types.coercedTo types.port + (x: { + from = x; + to = x; + }) + ( + types.submodule { + options = { + from = mkOption { + type = types.port; + description = "The start of the port range, inclusive."; + }; + + to = mkOption { + type = types.port; + description = "The end of the port range, inclusive."; + }; + }; + } + ); + + # All non-secret environment variables or the given component + collectEnvironment = + component: + mapAttrs (_: v: if isBool v then boolToString v else toString v) ( + cfg.settings // cfg.${component}.settings + ); + + # All mandatory secrets which were not explicitly provided by the user will + # have to be generated, if they do not yet exist. + generateSecrets = + let + requiredSecrets = filterAttrs (_: v: v == null) cfg.settingsSecret; + in + '' + mkdir -p secrets + chmod 700 secrets + '' + + concatLines ( + forEach (attrNames requiredSecrets) (secret: '' + if [[ ! -e secrets/${secret} ]]; then + echo "Generating ${secret}" + # Some secrets like TOKENS_KEY_BASE require a value >=64 bytes. + head -c 64 /dev/urandom | base64 -w 0 > secrets/${secret} + chmod 600 secrets/${secret} + fi + '') + ); + + # All secrets given in `cfg.settingsSecret` must be loaded from a file and + # exported into the environment. Also exclude any variables that were + # overwritten by the local component settings. + loadSecretEnvironment = + component: + let + relevantSecrets = subtractLists (attrNames cfg.${component}.settings) ( + attrNames cfg.settingsSecret + ); + in + concatLines ( + forEach relevantSecrets ( + secret: + ''export ${secret}=$(< ${ + if cfg.settingsSecret.${secret} == null then + "secrets/${secret}" + else + "\"$CREDENTIALS_DIRECTORY/${secret}\"" + })'' + ) + ); + + provisionStateJson = + let + # Convert clientSecretFile options into the real counterpart + augmentedAccounts = flip mapAttrs cfg.provision.accounts ( + accountName: account: + account + // { + auth = flip mapAttrs account.auth ( + authName: auth: + recursiveUpdate auth ( + optionalAttrs (auth.adapter_config.clientSecretFile != null) { + adapter_config.client_secret = "{env:AUTH_CLIENT_SECRET_${toUpper accountName}_${toUpper authName}}"; + } + ) + ); + } + ); + in + jsonFormat.generate "provision-state.json" { + # Do not include any clientSecretFile attributes in the resulting json + accounts = filterAttrsRecursive (k: _: k != "clientSecretFile") augmentedAccounts; + }; + + commonServiceConfig = { + AmbientCapablities = [ ]; + CapabilityBoundingSet = [ ]; + LockPersonality = true; + MemoryDenyWriteExecute = true; + NoNewPrivileges = true; + PrivateMounts = true; + PrivateTmp = true; + PrivateUsers = false; + ProcSubset = "pid"; + ProtectClock = true; + ProtectControlGroups = true; + ProtectHome = true; + ProtectHostname = true; + ProtectKernelLogs = true; + ProtectKernelModules = true; + ProtectKernelTunables = true; + ProtectProc = "invisible"; + ProtectSystem = "strict"; + RestrictAddressFamilies = [ + "AF_INET" + "AF_INET6" + "AF_NETLINK" + "AF_UNIX" + ]; + RestrictNamespaces = true; + RestrictRealtime = true; + RestrictSUIDSGID = true; + SystemCallArchitectures = "native"; + SystemCallFilter = "@system-service"; + UMask = "077"; + + DynamicUser = true; + User = "firezone"; + + Slice = "system-firezone.slice"; + StateDirectory = "firezone"; + WorkingDirectory = "/var/lib/firezone"; + + LoadCredential = mapAttrsToList (secretName: secretFile: "${secretName}:${secretFile}") ( + filterAttrs (_: v: v != null) cfg.settingsSecret + ); + Type = "exec"; + Restart = "on-failure"; + RestartSec = 10; + }; + + componentOptions = component: { + enable = mkEnableOption "the Firezone ${component} server"; + package = mkPackageOption pkgs "firezone-server-${component}" { }; + + settings = mkOption { + description = '' + Environment variables for this component of the Firezone server. For a + list of available variables, please refer to the [upstream definitions](https://github.com/firezone/firezone/blob/main/elixir/apps/domain/lib/domain/config/definitions.ex). + Some variables like `OUTBOUND_EMAIL_ADAPTER_OPTS` require json values + for which you can use `VAR = builtins.toJSON { /* ... */ }`. + + This component will automatically inherit all variables defined via + {option}`services.firezone.server.settings` and + {option}`services.firezone.server.settingsSecret`, but which can be + overwritten by this option. + ''; + default = { }; + type = types.submodule { + freeformType = types.attrsOf ( + types.oneOf [ + types.bool + types.float + types.int + types.str + types.path + types.package + ] + ); + }; + }; + }; +in +{ + options.services.firezone.server = { + enable = mkEnableOption "all Firezone components"; + enableLocalDB = mkEnableOption "a local postgresql database for Firezone"; + nginx.enable = mkEnableOption "nginx virtualhost definition"; + + openClusterFirewall = mkOption { + type = types.bool; + default = false; + description = '' + Opens up the erlang distribution port of all enabled components to + allow reaching the server cluster from the internet. You only need to + set this if you are actually distributing your cluster across multiple + machines. + ''; + }; + + clusterHosts = mkOption { + type = types.listOf types.str; + default = [ + "api@localhost.localdomain" + "web@localhost.localdomain" + "domain@localhost.localdomain" + ]; + description = '' + A list of components and their hosts that are part of this cluster. For + a single-machine setup, the default value will be sufficient. This + value will automatically set `ERLANG_CLUSTER_ADAPTER_CONFIG`. + + The format is `@`. + ''; + }; + + settingsSecret = mkOption { + default = { }; + description = '' + This is a convenience option which allows you to set secret values for + environment variables by specifying a file which will contain the value + at runtime. Before starting the server, the content of each file will + be loaded into the respective environment variable. + + Otherwise, this option is equivalent to + {option}`services.firezone.server.settings`. Refer to the settings + option for more information regarding the actual variables and how + filtering rules are applied for each component. + ''; + type = types.submodule { + freeformType = types.attrsOf types.path; + options = { + RELEASE_COOKIE = mkOption { + type = types.nullOr types.path; + default = null; + description = '' + A file containing a unique secret identifier for the Erlang + cluster. All Firezone components in your cluster must use the + same value. + + If this is `null`, a shared value will automatically be generated + on startup and used for all components on this machine. You do + not need to set this except when you spread your cluster over + multiple hosts. + ''; + }; + + TOKENS_KEY_BASE = mkOption { + type = types.nullOr types.path; + default = null; + description = '' + A file containing a unique base64 encoded secret for the + `TOKENS_KEY_BASE`. All Firezone components in your cluster must + use the same value. + + If this is `null`, a shared value will automatically be generated + on startup and used for all components on this machine. You do + not need to set this except when you spread your cluster over + multiple hosts. + ''; + }; + + SECRET_KEY_BASE = mkOption { + type = types.nullOr types.path; + default = null; + description = '' + A file containing a unique base64 encoded secret for the + `SECRET_KEY_BASE`. All Firezone components in your cluster must + use the same value. + + If this is `null`, a shared value will automatically be generated + on startup and used for all components on this machine. You do + not need to set this except when you spread your cluster over + multiple hosts. + ''; + }; + + TOKENS_SALT = mkOption { + type = types.nullOr types.path; + default = null; + description = '' + A file containing a unique base64 encoded secret for the + `TOKENS_SALT`. All Firezone components in your cluster must + use the same value. + + If this is `null`, a shared value will automatically be generated + on startup and used for all components on this machine. You do + not need to set this except when you spread your cluster over + multiple hosts. + ''; + }; + + LIVE_VIEW_SIGNING_SALT = mkOption { + type = types.nullOr types.path; + default = null; + description = '' + A file containing a unique base64 encoded secret for the + `LIVE_VIEW_SIGNING_SALT`. All Firezone components in your cluster must + use the same value. + + If this is `null`, a shared value will automatically be generated + on startup and used for all components on this machine. You do + not need to set this except when you spread your cluster over + multiple hosts. + ''; + }; + + COOKIE_SIGNING_SALT = mkOption { + type = types.nullOr types.path; + default = null; + description = '' + A file containing a unique base64 encoded secret for the + `COOKIE_SIGNING_SALT`. All Firezone components in your cluster must + use the same value. + + If this is `null`, a shared value will automatically be generated + on startup and used for all components on this machine. You do + not need to set this except when you spread your cluster over + multiple hosts. + ''; + }; + + COOKIE_ENCRYPTION_SALT = mkOption { + type = types.nullOr types.path; + default = null; + description = '' + A file containing a unique base64 encoded secret for the + `COOKIE_ENCRYPTION_SALT`. All Firezone components in your cluster must + use the same value. + + If this is `null`, a shared value will automatically be generated + on startup and used for all components on this machine. You do + not need to set this except when you spread your cluster over + multiple hosts. + ''; + }; + }; + }; + }; + + settings = mkOption { + description = '' + Environment variables for the Firezone server. For a list of available + variables, please refer to the [upstream definitions](https://github.com/firezone/firezone/blob/main/elixir/apps/domain/lib/domain/config/definitions.ex). + Some variables like `OUTBOUND_EMAIL_ADAPTER_OPTS` require json values + for which you can use `VAR = builtins.toJSON { /* ... */ }`. + + Each component has an additional `settings` option which allows you to + override specific variables passed to that component. + ''; + default = { }; + type = types.submodule { + freeformType = types.attrsOf ( + types.oneOf [ + types.bool + types.float + types.int + types.str + types.path + types.package + ] + ); + }; + }; + + smtp = { + configureManually = mkOption { + type = types.bool; + default = false; + description = '' + Outbound email configuration is mandatory for Firezone and supports + many different delivery adapters. Yet, most users will only need an + SMTP relay to send emails, so this configuration enforced by default. + + If you want to utilize an alternative way to send emails (e.g. via a + supportd API-based service), enable this option and define + `OUTBOUND_EMAIL_FROM`, `OUTBOUND_EMAIL_ADAPTER` and + `OUTBOUND_EMAIL_ADAPTER_OPTS` manually via + {option}`services.firezone.server.settings` and/or + {option}`services.firezone.server.settingsSecret`. + + The Firezone documentation holds [a list of supported Swoosh adapters](https://github.com/firezone/firezone/blob/main/website/src/app/docs/reference/env-vars/readme.mdx#outbound-emails). + ''; + }; + + from = mkOption { + type = types.str; + example = "firezone@example.com"; + description = "Outbound SMTP FROM address"; + }; + + host = mkOption { + type = types.str; + example = "mail.example.com"; + description = "Outbound SMTP host"; + }; + + port = mkOption { + type = types.port; + example = 465; + description = "Outbound SMTP port"; + }; + + implicitTls = mkOption { + type = types.bool; + default = false; + description = "Whether to use implicit TLS instead of STARTTLS (usually port 465)"; + }; + + username = mkOption { + type = types.str; + example = "firezone@example.com"; + description = "Username to authenticate against the SMTP relay"; + }; + + passwordFile = mkOption { + type = types.path; + example = "/run/secrets/smtp-password"; + description = "File containing the password for the given username. Beware that a file in the nix store will be world readable."; + }; + }; + + domain = componentOptions "domain"; + + web = componentOptions "web" // { + externalUrl = mkOption { + type = types.strMatching "^https://.+/$"; + example = "https://firezone.example.com/"; + description = '' + The external URL under which you will serve the web interface. You + need to setup a reverse proxy for TLS termination, either with + {option}`services.firezone.server.nginx.enable` or manually. + ''; + }; + + address = mkOption { + type = types.str; + default = "127.0.0.1"; + description = "The address to listen on"; + }; + + port = mkOption { + type = types.port; + default = 8080; + description = "The port under which the web interface will be served locally"; + }; + + trustedProxies = mkOption { + type = types.listOf types.str; + default = [ ]; + description = "A list of trusted proxies"; + }; + }; + + api = componentOptions "api" // { + externalUrl = mkOption { + type = types.strMatching "^https://.+/$"; + example = "https://firezone.example.com/api/"; + description = '' + The external URL under which you will serve the api. You need to + setup a reverse proxy for TLS termination, either with + {option}`services.firezone.server.nginx.enable` or manually. + ''; + }; + + address = mkOption { + type = types.str; + default = "127.0.0.1"; + description = "The address to listen on"; + }; + + port = mkOption { + type = types.port; + default = 8081; + description = "The port under which the api will be served locally"; + }; + + trustedProxies = mkOption { + type = types.listOf types.str; + default = [ ]; + description = "A list of trusted proxies"; + }; + }; + + provision = { + enable = mkEnableOption "provisioning of the Firezone domain server"; + accounts = mkOption { + type = types.attrsOf ( + types.submodule { + freeformType = jsonFormat.type; + options = { + name = mkOption { + type = types.str; + description = "The account name"; + example = "My Organization"; + }; + + features = + let + mkFeatureOption = + name: default: + mkOption { + type = types.bool; + inherit default; + description = "Whether to enable the `${name}` feature for this account."; + }; + in + { + flow_activities = mkFeatureOption "flow_activities" true; + policy_conditions = mkFeatureOption "policy_conditions" true; + multi_site_resources = mkFeatureOption "multi_site_resources" true; + traffic_filters = mkFeatureOption "traffic_filters" true; + self_hosted_relays = mkFeatureOption "self_hosted_relays" true; + idp_sync = mkFeatureOption "idp_sync" true; + rest_api = mkFeatureOption "rest_api" true; + internet_resource = mkFeatureOption "internet_resource" true; + }; + + actors = mkOption { + type = types.attrsOf ( + types.submodule { + options = { + type = mkOption { + type = types.enum [ + "account_admin_user" + "account_user" + "service_account" + "api_client" + ]; + description = "The account type"; + }; + + name = mkOption { + type = types.str; + description = "The name of this actor"; + }; + + email = mkOption { + type = types.str; + description = "The email address used to authenticate as this account"; + }; + }; + } + ); + default = { }; + example = { + admin = { + type = "account_admin_user"; + name = "Admin"; + email = "admin@myorg.example.com"; + }; + }; + description = '' + All actors (users) to provision. The attribute name will only + be used to track the actor and does not have any significance + for Firezone. + ''; + }; + + auth = mkOption { + type = types.attrsOf ( + types.submodule { + freeformType = jsonFormat.type; + options = { + name = mkOption { + type = types.str; + description = "The name of this authentication provider"; + }; + + adapter = mkOption { + type = types.enum availableAuthAdapters; + description = "The auth adapter type"; + }; + + adapter_config.clientSecretFile = mkOption { + type = types.nullOr types.path; + default = null; + description = '' + A file containing a the client secret for an openid_connect adapter. + You only need to set this if this is an openid_connect provider. + ''; + }; + }; + } + ); + default = { }; + example = { + myoidcprovider = { + adapter = "openid_connect"; + adapter_config = { + client_id = "clientid"; + clientSecretFile = "/run/secrets/oidc-client-secret"; + response_type = "code"; + scope = "openid email name"; + discorvery_document_uri = "https://auth.example.com/.well-known/openid-configuration"; + }; + }; + }; + description = '' + All authentication providers to provision. The attribute name + will only be used to track the provider and does not have any + significance for Firezone. + ''; + }; + + resources = mkOption { + type = types.attrsOf ( + types.submodule { + options = { + type = mkOption { + type = types.enum [ + "dns" + "cidr" + "ip" + ]; + description = "The resource type"; + }; + + name = mkOption { + type = types.str; + description = "The name of this resource"; + }; + + address = mkOption { + type = types.str; + description = "The address of this resource. Depending on the resource type, this should be an ip, ip with cidr mask or a domain."; + }; + + addressDescription = mkOption { + type = types.nullOr types.str; + default = null; + description = "An optional description for resource address, usually a full link to the resource including a schema."; + }; + + gatewayGroups = mkOption { + type = types.nonEmptyListOf types.str; + description = "A list of gateway groups (sites) which can reach the resource and may be used to connect to it."; + }; + + filters = mkOption { + type = types.listOf ( + types.submodule { + options = { + protocol = mkOption { + type = types.enum [ + "icmp" + "tcp" + "udp" + ]; + description = "The protocol to allow"; + }; + + ports = mkOption { + type = types.listOf typePortRange; + example = [ + 443 + { + from = 8080; + to = 8100; + } + ]; + default = [ ]; + apply = + xs: map (x: if x.from == x.to then toString x.from else "${toString x.from} - ${toString x.to}") xs; + description = "Either a single port or port range to allow. Both bounds are inclusive."; + }; + }; + } + ); + default = [ ]; + description = "A list of filter to restrict traffic. If no filters are given, all traffic is allowed."; + }; + }; + } + ); + default = { }; + example = { + vaultwarden = { + type = "dns"; + name = "Vaultwarden"; + address = "vault.example.com"; + address_description = "https://vault.example.com"; + gatewayGroups = [ "my-site" ]; + filters = [ + { protocol = "icmp"; } + { + protocol = "tcp"; + ports = [ + 80 + 443 + ]; + } + ]; + }; + }; + description = '' + All resources to provision. The attribute name will only be used to + track the resource and does not have any significance for Firezone. + ''; + }; + + policies = mkOption { + type = types.attrsOf ( + types.submodule { + options = { + description = mkOption { + type = types.nullOr types.str; + description = "The description of this policy"; + }; + + group = mkOption { + type = types.str; + description = "The group which should be allowed access to the given resource."; + }; + + resource = mkOption { + type = types.str; + description = "The resource to which access should be allowed."; + }; + }; + } + ); + default = { }; + example = { + access_vaultwarden = { + name = "Allow anyone to access vaultwarden"; + group = "everyone"; + resource = "vaultwarden"; + }; + }; + description = '' + All policies to provision. The attribute name will only be used to + track the policy and does not have any significance for Firezone. + ''; + }; + + groups = mkOption { + type = types.attrsOf ( + types.submodule { + options = { + name = mkOption { + type = types.str; + description = "The name of this group"; + }; + + members = mkOption { + type = types.listOf types.str; + default = [ ]; + description = "The members of this group"; + }; + + forceMembers = mkOption { + type = types.bool; + default = false; + description = "Ensure that only the given members are part of this group at every server start."; + }; + }; + } + ); + default = { }; + example = { + users = { + name = "Users"; + }; + }; + description = '' + All groups to provision. The attribute name will only be used + to track the group and does not have any significance for + Firezone. + + A group named `everyone` will automatically be managed by Firezone. + ''; + }; + + relayGroups = mkOption { + type = types.attrsOf ( + types.submodule { + options = { + name = mkOption { + type = types.str; + description = "The name of this relay group"; + }; + }; + } + ); + default = { }; + example = { + my-relays = { + name = "My Relays"; + }; + }; + description = '' + All relay groups to provision. The attribute name + will only be used to track the relay group and does not have any + significance for Firezone. + ''; + }; + + gatewayGroups = mkOption { + type = types.attrsOf ( + types.submodule { + options = { + name = mkOption { + type = types.str; + description = "The name of this gateway group"; + }; + }; + } + ); + default = { }; + example = { + my-gateways = { + name = "My Gateways"; + }; + }; + description = '' + All gateway groups (sites) to provision. The attribute name + will only be used to track the gateway group and does not have any + significance for Firezone. + ''; + }; + }; + } + ); + default = { }; + example = { + main = { + name = "My Account / Organization"; + metadata.stripe.billing_email = "org@myorg.example.com"; + features.rest_api = false; + }; + }; + description = '' + All accounts to provision. The attribute name specified here will + become the account slug. By using `"{file:/path/to/file}"` as a + string value anywhere in these settings, the provisioning script will + replace that value with the content of the given file at runtime. + + Please refer to the [Firezone source code](https://github.com/firezone/firezone/blob/main/elixir/apps/domain/lib/domain/accounts/account.ex) + for all available properties. + ''; + }; + }; + }; + + config = mkMerge [ + { + assertions = + [ + { + assertion = cfg.provision.enable -> cfg.domain.enable; + message = "Provisioning must be done on a machine running the firezone domain server"; + } + ] + ++ concatLists ( + flip mapAttrsToList cfg.provision.accounts ( + accountName: accountCfg: + [ + { + assertion = (builtins.match "^[[:lower:]_-]+$" accountName) != null; + message = "An account name must contain only lowercase characters and underscores, as it will be used as the URL slug for this account."; + } + ] + ++ flip mapAttrsToList accountCfg.auth ( + authName: _: { + assertion = (builtins.match "^[[:alnum:]_-]+$" authName) != null; + message = "The authentication provider attribute key must contain only letters, numbers, underscores or dashes."; + } + ) + ) + ); + } + # Enable all components if the main server is enabled + (mkIf cfg.enable { + services.firezone.server.domain.enable = true; + services.firezone.server.web.enable = true; + services.firezone.server.api.enable = true; + }) + # Create (and configure) a local database if desired + (mkIf cfg.enableLocalDB { + services.postgresql = { + enable = true; + ensureUsers = [ + { + name = "firezone"; + ensureDBOwnership = true; + } + ]; + ensureDatabases = [ "firezone" ]; + }; + + services.firezone.server.settings = { + DATABASE_SOCKET_DIR = "/run/postgresql"; + DATABASE_PORT = "5432"; + DATABASE_NAME = "firezone"; + DATABASE_USER = "firezone"; + DATABASE_PASSWORD = "firezone"; + }; + }) + # Create a local nginx reverse proxy + (mkIf cfg.nginx.enable { + services.nginx = mkMerge [ + { + enable = true; + } + ( + let + urlComponents = builtins.elemAt (builtins.split "https://([^/]*)(/?.*)" cfg.web.externalUrl) 1; + domain = builtins.elemAt urlComponents 0; + location = builtins.elemAt urlComponents 1; + in + { + virtualHosts.${domain} = { + forceSSL = mkDefault true; + locations.${location} = { + # The trailing slash is important to strip the location prefix from the request + proxyPass = "http://${cfg.web.address}:${toString cfg.web.port}/"; + proxyWebsockets = true; + }; + }; + } + ) + ( + let + urlComponents = builtins.elemAt (builtins.split "https://([^/]*)(/?.*)" cfg.api.externalUrl) 1; + domain = builtins.elemAt urlComponents 0; + location = builtins.elemAt urlComponents 1; + in + { + virtualHosts.${domain} = { + forceSSL = mkDefault true; + locations.${location} = { + # The trailing slash is important to strip the location prefix from the request + proxyPass = "http://${cfg.api.address}:${toString cfg.api.port}/"; + proxyWebsockets = true; + }; + }; + } + ) + ]; + }) + # Specify sensible defaults + { + services.firezone.server = { + settings = { + LOG_LEVEL = mkDefault "info"; + RELEASE_HOSTNAME = mkDefault "localhost.localdomain"; + + ERLANG_CLUSTER_ADAPTER = mkDefault "Elixir.Cluster.Strategy.Epmd"; + ERLANG_CLUSTER_ADAPTER_CONFIG = mkDefault ( + builtins.toJSON { + hosts = cfg.clusterHosts; + } + ); + + TZDATA_DIR = mkDefault "/var/lib/firezone/tzdata"; + TELEMETRY_ENABLED = mkDefault false; + + # By default this will open nproc * 2 connections for each component, + # which can exceeds the (default) maximum of 100 connections for + # postgresql on a 12 core +SMT machine. 16 connections will be + # sufficient for small to medium deployments + DATABASE_POOL_SIZE = "16"; + + AUTH_PROVIDER_ADAPTERS = mkDefault (concatStringsSep "," availableAuthAdapters); + + FEATURE_FLOW_ACTIVITIES_ENABLED = mkDefault true; + FEATURE_POLICY_CONDITIONS_ENABLED = mkDefault true; + FEATURE_MULTI_SITE_RESOURCES_ENABLED = mkDefault true; + FEATURE_SELF_HOSTED_RELAYS_ENABLED = mkDefault true; + FEATURE_IDP_SYNC_ENABLED = mkDefault true; + FEATURE_REST_API_ENABLED = mkDefault true; + FEATURE_INTERNET_RESOURCE_ENABLED = mkDefault true; + FEATURE_TRAFFIC_FILTERS_ENABLED = mkDefault true; + + FEATURE_SIGN_UP_ENABLED = mkDefault (!cfg.provision.enable); + }; + + domain.settings = { + ERLANG_DISTRIBUTION_PORT = mkDefault 9000; + HEALTHZ_PORT = mkDefault 4000; + BACKGROUND_JOBS_ENABLED = mkDefault true; + }; + + web.settings = { + ERLANG_DISTRIBUTION_PORT = mkDefault 9001; + HEALTHZ_PORT = mkDefault 4001; + BACKGROUND_JOBS_ENABLED = mkDefault false; + + PHOENIX_LISTEN_ADDRESS = mkDefault cfg.web.address; + PHOENIX_EXTERNAL_TRUSTED_PROXIES = mkDefault (builtins.toJSON cfg.web.trustedProxies); + PHOENIX_HTTP_WEB_PORT = mkDefault cfg.web.port; + PHOENIX_HTTP_API_PORT = mkDefault cfg.api.port; + PHOENIX_SECURE_COOKIES = mkDefault true; # enforce HTTPS on cookies + WEB_EXTERNAL_URL = mkDefault cfg.web.externalUrl; + API_EXTERNAL_URL = mkDefault cfg.api.externalUrl; + }; + + api.settings = { + ERLANG_DISTRIBUTION_PORT = mkDefault 9002; + HEALTHZ_PORT = mkDefault 4002; + BACKGROUND_JOBS_ENABLED = mkDefault false; + + PHOENIX_LISTEN_ADDRESS = mkDefault cfg.api.address; + PHOENIX_EXTERNAL_TRUSTED_PROXIES = mkDefault (builtins.toJSON cfg.api.trustedProxies); + PHOENIX_HTTP_WEB_PORT = mkDefault cfg.web.port; + PHOENIX_HTTP_API_PORT = mkDefault cfg.api.port; + PHOENIX_SECURE_COOKIES = mkDefault true; # enforce HTTPS on cookies + WEB_EXTERNAL_URL = mkDefault cfg.web.externalUrl; + API_EXTERNAL_URL = mkDefault cfg.api.externalUrl; + }; + }; + } + (mkIf (!cfg.smtp.configureManually) { + services.firezone.server.settings = { + OUTBOUND_EMAIL_ADAPTER = "Elixir.Swoosh.Adapters.Mua"; + OUTBOUND_EMAIL_ADAPTER_OPTS = builtins.toJSON { }; + OUTBOUND_EMAIL_FROM = cfg.smtp.from; + OUTBOUND_EMAIL_SMTP_HOST = cfg.smtp.host; + OUTBOUND_EMAIL_SMTP_PORT = toString cfg.smtp.port; + OUTBOUND_EMAIL_SMTP_PROTOCOL = if cfg.smtp.implicitTls then "ssl" else "tcp"; + OUTBOUND_EMAIL_SMTP_USERNAME = cfg.smtp.username; + }; + services.firezone.server.settingsSecret = { + OUTBOUND_EMAIL_SMTP_PASSWORD = cfg.smtp.passwordFile; + }; + }) + (mkIf cfg.provision.enable { + # Load client secrets from authentication providers + services.firezone.server.settingsSecret = flip concatMapAttrs cfg.provision.accounts ( + accountName: accountCfg: + flip concatMapAttrs accountCfg.auth ( + authName: authCfg: + optionalAttrs (authCfg.adapter_config.clientSecretFile != null) { + "AUTH_CLIENT_SECRET_${toUpper accountName}_${toUpper authName}" = + authCfg.adapter_config.clientSecretFile; + } + ) + ); + }) + (mkIf (cfg.openClusterFirewall && cfg.domain.enable) { + networking.firewall.allowedTCPPorts = [ + cfg.domain.settings.ERLANG_DISTRIBUTION_PORT + ]; + }) + (mkIf (cfg.openClusterFirewall && cfg.web.enable) { + networking.firewall.allowedTCPPorts = [ + cfg.web.settings.ERLANG_DISTRIBUTION_PORT + ]; + }) + (mkIf (cfg.openClusterFirewall && cfg.api.enable) { + networking.firewall.allowedTCPPorts = [ + cfg.api.settings.ERLANG_DISTRIBUTION_PORT + ]; + }) + (mkIf (cfg.domain.enable || cfg.web.enable || cfg.api.enable) { + systemd.slices.system-firezone = { + description = "Firezone Slice"; + }; + + systemd.targets.firezone = { + description = "Common target for all Firezone services."; + wantedBy = [ "multi-user.target" ]; + }; + + systemd.services.firezone-initialize = { + description = "Backend initialization service for the Firezone zero-trust access platform"; + + after = mkIf cfg.enableLocalDB [ "postgresql.service" ]; + requires = mkIf cfg.enableLocalDB [ "postgresql.service" ]; + wantedBy = [ "firezone.target" ]; + partOf = [ "firezone.target" ]; + + script = '' + mkdir -p "$TZDATA_DIR" + + # Generate and load secrets + ${generateSecrets} + ${loadSecretEnvironment "domain"} + + echo "Running migrations" + ${getExe cfg.domain.package} eval Domain.Release.migrate + ''; + + # We use the domain environment to be able to run migrations + environment = collectEnvironment "domain"; + serviceConfig = commonServiceConfig // { + Type = "oneshot"; + RemainAfterExit = true; + }; + }; + + systemd.services.firezone-server-domain = mkIf cfg.domain.enable { + description = "Backend domain server for the Firezone zero-trust access platform"; + after = [ "firezone-initialize.service" ]; + bindsTo = [ "firezone-initialize.service" ]; + wantedBy = [ "firezone.target" ]; + partOf = [ "firezone.target" ]; + + script = '' + ${loadSecretEnvironment "domain"} + exec ${getExe cfg.domain.package} start; + ''; + + path = [ pkgs.curl ]; + postStart = + '' + # Wait for the firezone server to come online + count=0 + while [[ "$(curl -s "http://localhost:${toString cfg.domain.settings.HEALTHZ_PORT}" 2>/dev/null || echo)" != '{"status":"ok"}' ]] + do + sleep 1 + if [[ "$count" -eq 30 ]]; then + echo "Tried for at least 30 seconds, giving up..." + exit 1 + fi + count=$((count++)) + done + '' + + optionalString cfg.provision.enable '' + # Wait for server to fully come up. Not ideal to use sleep, but at least it works. + sleep 1 + + ${loadSecretEnvironment "domain"} + ln -sTf ${provisionStateJson} provision-state.json + ${getExe cfg.domain.package} rpc 'Code.eval_file("${./provision.exs}")' + ''; + + environment = collectEnvironment "domain"; + serviceConfig = commonServiceConfig; + }; + + systemd.services.firezone-server-web = mkIf cfg.web.enable { + description = "Backend web server for the Firezone zero-trust access platform"; + after = [ "firezone-initialize.service" ]; + bindsTo = [ "firezone-initialize.service" ]; + wantedBy = [ "firezone.target" ]; + partOf = [ "firezone.target" ]; + + script = '' + ${loadSecretEnvironment "web"} + exec ${getExe cfg.web.package} start; + ''; + + environment = collectEnvironment "web"; + serviceConfig = commonServiceConfig; + }; + + systemd.services.firezone-server-api = mkIf cfg.api.enable { + description = "Backend api server for the Firezone zero-trust access platform"; + after = [ "firezone-initialize.service" ]; + bindsTo = [ "firezone-initialize.service" ]; + wantedBy = [ "firezone.target" ]; + partOf = [ "firezone.target" ]; + + script = '' + ${loadSecretEnvironment "api"} + exec ${getExe cfg.api.package} start; + ''; + + environment = collectEnvironment "api"; + serviceConfig = commonServiceConfig; + }; + }) + ]; + + meta.maintainers = with lib.maintainers; [ + oddlama + patrickdag + ]; +} diff --git a/fz/pkgs/firezone-gateway/package.nix b/fz/pkgs/firezone-gateway/package.nix new file mode 100644 index 0000000..0e6010b --- /dev/null +++ b/fz/pkgs/firezone-gateway/package.nix @@ -0,0 +1,45 @@ +{ + lib, + rustPlatform, + fetchFromGitHub, + nix-update-script, +}: +rustPlatform.buildRustPackage rec { + pname = "firezone-gateway"; + version = "1.4.5"; + src = fetchFromGitHub { + owner = "firezone"; + repo = "firezone"; + tag = "gateway-${version}"; + hash = "sha256-2MDQyMCQIqV1Kbem53jnE8DGUZ6SrZqp2LpGJXvLBgA="; + }; + + useFetchCargoVendor = true; + cargoHash = "sha256-Yz9xuH8Eph1pzv0siTpvdSXZLj/AjS5PR06CitK9NdE="; + sourceRoot = "${src.name}/rust"; + buildAndTestSubdir = "gateway"; + RUSTFLAGS = "--cfg system_certs"; + + # Required to remove profiling arguments which conflict with this builder + postPatch = '' + rm .cargo/config.toml + ''; + + passthru.updateScript = nix-update-script { + extraArgs = [ + "--version-regex" + "gateway-(.*)" + ]; + }; + + meta = { + description = "WireGuard tunnel server for the Firezone zero-trust access platform"; + homepage = "https://github.com/firezone/firezone"; + license = lib.licenses.asl20; + maintainers = with lib.maintainers; [ + oddlama + patrickdag + ]; + mainProgram = "firezone-gateway"; + }; +} diff --git a/fz/pkgs/firezone-gui-client/package.nix b/fz/pkgs/firezone-gui-client/package.nix new file mode 100644 index 0000000..c708a41 --- /dev/null +++ b/fz/pkgs/firezone-gui-client/package.nix @@ -0,0 +1,166 @@ +{ + lib, + rustPlatform, + fetchFromGitHub, + nix-update-script, + stdenvNoCC, + pkg-config, + openssl, + dbus, + zenity, + cargo-tauri, + gdk-pixbuf, + glib, + gobject-introspection, + gtk3, + kdePackages, + libsoup_3, + libayatana-appindicator, + webkitgtk_4_1, + wrapGAppsHook3, + pnpm_9, + nodejs, + makeDesktopItem, + copyDesktopItems, +}: +let + version = "1.4.8"; + src = fetchFromGitHub { + owner = "firezone"; + repo = "firezone"; + tag = "gui-client-${version}"; + hash = "sha256-YaT/AdnBLDmoda8CGHG/Nc6RFAW8zqr4fOnTdeARlUA="; + }; + + frontend = stdenvNoCC.mkDerivation rec { + pname = "firezone-gui-client-frontend"; + inherit version src; + + pnpmDeps = pnpm_9.fetchDeps { + inherit pname version; + src = "${src}/rust/gui-client"; + hash = "sha256-9ywC920EF6UxkXHs+0WWaU8fr5J35/C+0nNGbSVHESE="; + }; + pnpmRoot = "rust/gui-client"; + + nativeBuildInputs = [ + pnpm_9.configHook + nodejs + ]; + + buildPhase = '' + runHook preBuild + + cd $pnpmRoot + cp node_modules/flowbite/dist/flowbite.min.js src/ + pnpm tailwindcss -i src/input.css -o src/output.css + node --max_old_space_size=1024000 ./node_modules/vite/bin/vite.js build + + runHook postBuild + ''; + + installPhase = '' + runHook preInstall + + cp -r dist $out + + runHook postInstall + ''; + }; +in +rustPlatform.buildRustPackage rec { + pname = "firezone-gui-client"; + inherit version src; + + useFetchCargoVendor = true; + cargoHash = "sha256-Yz9xuH8Eph1pzv0siTpvdSXZLj/AjS5PR06CitK9NdE="; + sourceRoot = "${src.name}/rust"; + buildAndTestSubdir = "gui-client"; + RUSTFLAGS = "--cfg system_certs"; + + nativeBuildInputs = [ + cargo-tauri.hook + pkg-config + wrapGAppsHook3 + copyDesktopItems + ]; + + buildInputs = [ + openssl + dbus + gdk-pixbuf + glib + gobject-introspection + gtk3 + libsoup_3 + + libayatana-appindicator + webkitgtk_4_1 + ]; + + # Required to remove profiling arguments which conflict with this builder + postPatch = '' + rm .cargo/config.toml + ln -s ${frontend} gui-client/dist + ''; + + # Tries to compile apple specific crates due to workspace dependencies, + # not sure if this can be worked around + doCheck = false; + + desktopItems = [ + # Additional desktop item to associate deep-links + (makeDesktopItem { + name = "firezone-client-gui-deep-link"; + exec = "firezone-client-gui open-deep-link %U"; + icon = "firezone-client-gui"; + comment = meta.description; + desktopName = "Firezone GUI Client"; + categories = [ "Network" ]; + noDisplay = true; + mimeTypes = [ + "x-scheme-handler/firezone-fd0020211111" + ]; + }) + ]; + + preFixup = '' + gappsWrapperArgs+=( + # Otherwise blank screen, see https://github.com/tauri-apps/tauri/issues/9304 + --set WEBKIT_DISABLE_DMABUF_RENDERER 1 + --prefix PATH ":" ${ + lib.makeBinPath [ + zenity + kdePackages.kdialog + ] + } + --prefix LD_LIBRARY_PATH ":" ${ + lib.makeLibraryPath [ + libayatana-appindicator + ] + } + ) + ''; + + passthru = { + inherit frontend; + + updateScript = nix-update-script { + extraArgs = [ + "--version-regex" + "gui-client-(.*)" + ]; + }; + }; + + meta = { + description = "GUI client for the Firezone zero-trust access platform"; + homepage = "https://github.com/firezone/firezone"; + license = lib.licenses.asl20; + maintainers = with lib.maintainers; [ + oddlama + patrickdag + ]; + mainProgram = "firezone-gui-client"; + }; +} diff --git a/fz/pkgs/firezone-headless-client/package.nix b/fz/pkgs/firezone-headless-client/package.nix new file mode 100644 index 0000000..fa8ad4e --- /dev/null +++ b/fz/pkgs/firezone-headless-client/package.nix @@ -0,0 +1,50 @@ +{ + lib, + rustPlatform, + fetchFromGitHub, + nix-update-script, +}: +rustPlatform.buildRustPackage rec { + pname = "firezone-headless-client"; + version = "1.4.4"; + src = fetchFromGitHub { + owner = "firezone"; + repo = "firezone"; + tag = "headless-client-${version}"; + hash = "sha256-2MDQyMCQIqV1Kbem53jnE8DGUZ6SrZqp2LpGJXvLBgA="; + }; + + useFetchCargoVendor = true; + cargoHash = "sha256-Yz9xuH8Eph1pzv0siTpvdSXZLj/AjS5PR06CitK9NdE="; + sourceRoot = "${src.name}/rust"; + buildAndTestSubdir = "headless-client"; + RUSTFLAGS = "--cfg system_certs"; + + # Required to remove profiling arguments which conflict with this builder + postPatch = '' + rm .cargo/config.toml + ''; + + # Required to run tests + preCheck = '' + export XDG_RUNTIME_DIR=$(mktemp -d) + ''; + + passthru.updateScript = nix-update-script { + extraArgs = [ + "--version-regex" + "headless-client-(.*)" + ]; + }; + + meta = { + description = "CLI client for the Firezone zero-trust access platform"; + homepage = "https://github.com/firezone/firezone"; + license = lib.licenses.asl20; + maintainers = with lib.maintainers; [ + oddlama + patrickdag + ]; + mainProgram = "firezone-headless-client"; + }; +} diff --git a/fz/pkgs/firezone-relay/package.nix b/fz/pkgs/firezone-relay/package.nix new file mode 100644 index 0000000..45dac6e --- /dev/null +++ b/fz/pkgs/firezone-relay/package.nix @@ -0,0 +1,37 @@ +{ + lib, + rustPlatform, + fetchFromGitHub, +}: +rustPlatform.buildRustPackage rec { + pname = "firezone-relay"; + version = "0-unstable-2025-03-15"; + src = fetchFromGitHub { + owner = "firezone"; + repo = "firezone"; + rev = "09fb5f927410503b0d6e7fc6cf6a2ba06cb5a281"; + hash = "sha256-qDeXAzOeTenL6OIsun/rEfPMo62mQT7RhJEmqemzMsM="; + }; + + useFetchCargoVendor = true; + cargoHash = "sha256-uqy4GgYaSX2kM4a37093lHmhvOtNUhkEs6/ZS1bjuYo="; + sourceRoot = "${src.name}/rust"; + buildAndTestSubdir = "relay"; + RUSTFLAGS = "--cfg system_certs"; + + # Required to remove profiling arguments which conflict with this builder + postPatch = '' + rm .cargo/config.toml + ''; + + meta = { + description = "STUN/TURN server for the Firezone zero-trust access platform"; + homepage = "https://github.com/firezone/firezone"; + license = lib.licenses.asl20; + maintainers = with lib.maintainers; [ + oddlama + patrickdag + ]; + mainProgram = "firezone-relay"; + }; +} diff --git a/fz/pkgs/firezone-server/0000-add-mua.patch b/fz/pkgs/firezone-server/0000-add-mua.patch new file mode 100644 index 0000000..68d8a9b --- /dev/null +++ b/fz/pkgs/firezone-server/0000-add-mua.patch @@ -0,0 +1,67 @@ +diff --git a/elixir/apps/domain/lib/domain/config/definitions.ex b/elixir/apps/domain/lib/domain/config/definitions.ex +index 8cd2e8d0f..92e18b10b 100644 +--- a/elixir/apps/domain/lib/domain/config/definitions.ex ++++ b/elixir/apps/domain/lib/domain/config/definitions.ex +@@ -584,6 +590,7 @@ defmodule Domain.Config.Definitions do + Swoosh.Adapters.Mailgun, + Swoosh.Adapters.Mailjet, + Swoosh.Adapters.Mandrill, ++ Swoosh.Adapters.Mua, + Swoosh.Adapters.Postmark, + Swoosh.Adapters.ProtonBridge, + Swoosh.Adapters.SMTP, +diff --git a/elixir/config/runtime.exs b/elixir/config/runtime.exs +index 15037e0a3..475c4ddfb 100644 +--- a/elixir/config/runtime.exs ++++ b/elixir/config/runtime.exs +@@ -226,8 +228,15 @@ if config_env() == :prod do + config :domain, + Domain.Mailer, + [ +- adapter: compile_config!(:outbound_email_adapter), +- from_email: compile_config!(:outbound_email_from) ++ adapter: compile_config!(:outbound_email_adapter), ++ from_email: compile_config!(:outbound_email_from), ++ protocol: String.to_atom(System.get_env("OUTBOUND_EMAIL_SMTP_PROTOCOL")), ++ relay: System.get_env("OUTBOUND_EMAIL_SMTP_HOST"), ++ port: String.to_integer(System.get_env("OUTBOUND_EMAIL_SMTP_PORT")), ++ auth: [ ++ username: System.get_env("OUTBOUND_EMAIL_SMTP_USERNAME"), ++ password: System.get_env("OUTBOUND_EMAIL_SMTP_PASSWORD") ++ ] + ] ++ compile_config!(:outbound_email_adapter_opts) + + config :workos, WorkOS.Client, +diff --git a/elixir/mix.exs b/elixir/mix.exs +index 12782d631..dee1245d2 100644 +--- a/elixir/mix.exs ++++ b/elixir/mix.exs +@@ -47,7 +47,9 @@ defmodule Firezone.MixProject do + # Formatter doesn't track dependencies of children applications + {:phoenix, "~> 1.7.0"}, + {:phoenix_live_view, "~> 1.0.0-rc.0"}, +- {:floki, "~> 0.37.0"} ++ {:floki, "~> 0.37.0"}, ++ {:mua, "~> 0.2.0"}, ++ {:mail, "~> 0.3.0"} + ] + end + +diff --git a/elixir/mix.lock b/elixir/mix.lock +index 8c4b65959..3d2f9faca 100644 +--- a/elixir/mix.lock ++++ b/elixir/mix.lock +@@ -50,11 +50,13 @@ + "junit_formatter": {:hex, :junit_formatter, "3.4.0", "d0e8db6c34dab6d3c4154c3b46b21540db1109ae709d6cf99ba7e7a2ce4b1ac2", [:mix], [], "hexpm", "bb36e2ae83f1ced6ab931c4ce51dd3dbef1ef61bb4932412e173b0cfa259dacd"}, + "libcluster": {:hex, :libcluster, "3.3.3", "a4f17721a19004cfc4467268e17cff8b1f951befe428975dd4f6f7b84d927fe0", [:mix], [{:jason, "~> 1.1", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "7c0a2275a0bb83c07acd17dab3c3bfb4897b145106750eeccc62d302e3bdfee5"}, + "logger_json": {:hex, :logger_json, "6.2.0", "13e2e9f5f7b195865c5c3ef3d296c3ad50e7ecb038d899433702a79e979b91d7", [:mix], [{:ecto, "~> 3.11", [hex: :ecto, repo: "hexpm", optional: true]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: true]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "98366d02bedbb56e41b25a6d248d566d4f4bc224bae2b1e982df00ed04ba9219"}, ++ "mail": {:hex, :mail, "0.3.1", "cb0a14e4ed8904e4e5a08214e686ccf6f9099346885db17d8c309381f865cc5c", [:mix], [], "hexpm", "1db701e89865c1d5fa296b2b57b1cd587587cca8d8a1a22892b35ef5a8e352a6"}, + "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, + "mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"}, + "mimerl": {:hex, :mimerl, "1.3.0", "d0cd9fc04b9061f82490f6581e0128379830e78535e017f7780f37fea7545726", [:rebar3], [], "hexpm", "a1e15a50d1887217de95f0b9b0793e32853f7c258a5cd227650889b38839fe9d"}, + "mint": {:hex, :mint, "1.6.2", "af6d97a4051eee4f05b5500671d47c3a67dac7386045d87a904126fd4bbcea2e", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "5ee441dffc1892f1ae59127f74afe8fd82fda6587794278d924e4d90ea3d63f9"}, + "mix_audit": {:hex, :mix_audit, "2.1.4", "0a23d5b07350cdd69001c13882a4f5fb9f90fbd4cbf2ebc190a2ee0d187ea3e9", [:make, :mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:yaml_elixir, "~> 2.11", [hex: :yaml_elixir, repo: "hexpm", optional: false]}], "hexpm", "fd807653cc8c1cada2911129c7eb9e985e3cc76ebf26f4dd628bb25bbcaa7099"}, ++ "mua": {:hex, :mua, "0.2.4", "a9172ab0a1ac8732cf2699d739ceac3febcb9b4ffc540260ad2e32c0b6632af9", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}], "hexpm", "e7e4dacd5ad65f13e3542772e74a159c00bd2d5579e729e9bb72d2c73a266fb7"}, + "multipart": {:hex, :multipart, "0.4.0", "634880a2148d4555d050963373d0e3bbb44a55b2badd87fa8623166172e9cda0", [:mix], [{:mime, "~> 1.2 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}], "hexpm", "3c5604bc2fb17b3137e5d2abdf5dacc2647e60c5cc6634b102cf1aef75a06f0a"}, + "nimble_csv": {:hex, :nimble_csv, "1.2.0", "4e26385d260c61eba9d4412c71cea34421f296d5353f914afe3f2e71cce97722", [:mix], [], "hexpm", "d0628117fcc2148178b034044c55359b26966c6eaa8e2ce15777be3bbc91b12a"}, + "nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"}, diff --git a/fz/pkgs/firezone-server/package.nix b/fz/pkgs/firezone-server/package.nix new file mode 100644 index 0000000..87ed27d --- /dev/null +++ b/fz/pkgs/firezone-server/package.nix @@ -0,0 +1,90 @@ +{ + lib, + nixosTests, + fetchFromGitHub, + beamPackages, + gitMinimal, + pnpm_9, + nodejs, + tailwindcss_3, + esbuild, + + mixReleaseName ? "domain", # "domain" "web" or "api" +}: + +beamPackages.mixRelease rec { + pname = "firezone-server-${mixReleaseName}"; + version = "0-unstable-2025-03-15"; + + src = "${ + fetchFromGitHub { + owner = "firezone"; + repo = "firezone"; + rev = "09fb5f927410503b0d6e7fc6cf6a2ba06cb5a281"; + hash = "sha256-1CZBFhOwX0DfXykPQ9tzn4tHg2tSnByXEPtlZleHK5k="; + + # This is necessary to allow sending mails via SMTP, as the default + # SMTP adapter is current broken: https://github.com/swoosh/swoosh/issues/785 + postFetch = '' + ${lib.getExe gitMinimal} -C $out apply ${./0000-add-mua.patch} + ''; + } + }/elixir"; + + pnpmDeps = pnpm_9.fetchDeps { + inherit pname version; + src = "${src}/apps/web/assets"; + hash = "sha256-ejyBppFtKeyVhAWmssglbpLleOnbw9d4B+iM5Vtx47A="; + }; + pnpmRoot = "apps/web/assets"; + + preBuild = '' + cat >> config/config.exs <> config/runtime.exs <