mirror of
https://github.com/oddlama/nix-config.git
synced 2025-10-11 07:10:39 +02:00
feat: switch to influxdb2 module from provisioning PR
This commit is contained in:
parent
e99253b9db
commit
af066925b4
6 changed files with 1069 additions and 999 deletions
|
@ -1,4 +1,5 @@
|
||||||
{
|
{
|
||||||
|
disabledModules = ["services/databases/influxdb2.nix"];
|
||||||
imports = [
|
imports = [
|
||||||
../users/root
|
../users/root
|
||||||
|
|
||||||
|
@ -18,7 +19,7 @@
|
||||||
./config/users.nix
|
./config/users.nix
|
||||||
./config/xdg.nix
|
./config/xdg.nix
|
||||||
|
|
||||||
./meta/influxdb.nix
|
./meta/influxdb2.nix
|
||||||
./meta/microvms.nix
|
./meta/microvms.nix
|
||||||
./meta/nginx.nix
|
./meta/nginx.nix
|
||||||
./meta/oauth2-proxy.nix
|
./meta/oauth2-proxy.nix
|
||||||
|
|
|
@ -1,6 +0,0 @@
|
||||||
module influx-token-manipulator
|
|
||||||
|
|
||||||
go 1.20
|
|
||||||
|
|
||||||
require go.etcd.io/bbolt v1.3.7
|
|
||||||
require golang.org/x/sys v0.4.0 // indirect
|
|
|
@ -1,4 +0,0 @@
|
||||||
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
|
||||||
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
|
||||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
|
||||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
|
@ -1,122 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"go.etcd.io/bbolt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var tokenPaths = map[string]string{
|
|
||||||
// Add token secrets here or in separate file
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
if len(os.Args) != 2 {
|
|
||||||
fmt.Println("Usage: ./influx-token-manipulator <influxd.bolt>\n")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
dbPath := os.Args[1]
|
|
||||||
|
|
||||||
db, err := bbolt.Open(dbPath, 0666, nil)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Error opening database: %v\n", err)
|
|
||||||
}
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
err = db.Update(func(tx *bbolt.Tx) error {
|
|
||||||
authBucket := tx.Bucket([]byte("authorizationsv1"))
|
|
||||||
if authBucket == nil {
|
|
||||||
fmt.Println("Bucket 'authorizationsv1' not found.")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
authIndex := tx.Bucket([]byte("authorizationindexv1"))
|
|
||||||
if authIndex == nil {
|
|
||||||
fmt.Println("Bucket 'authorizationindexv1' not found.")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return authBucket.ForEach(func(k, v []byte) error {
|
|
||||||
var obj map[string]interface{}
|
|
||||||
if err := json.Unmarshal(v, &obj); err != nil {
|
|
||||||
fmt.Printf("Error unmarshalling JSON: %v\n", err)
|
|
||||||
return nil // Continue processing other rows
|
|
||||||
}
|
|
||||||
|
|
||||||
description, ok := obj["description"].(string)
|
|
||||||
if !ok {
|
|
||||||
return nil // Skip if description is not present
|
|
||||||
}
|
|
||||||
|
|
||||||
identifierRegex := regexp.MustCompile(`[0-9a-f]{32}`)
|
|
||||||
match := identifierRegex.FindString(description)
|
|
||||||
if match == "" {
|
|
||||||
return nil // Skip if description doesn't match regex
|
|
||||||
}
|
|
||||||
|
|
||||||
tokenPath, found := tokenPaths[match]
|
|
||||||
if !found {
|
|
||||||
return nil // Skip if match is not in lookup
|
|
||||||
}
|
|
||||||
delete(tokenPaths, match) // Remove entry from the map
|
|
||||||
|
|
||||||
content, err := ioutil.ReadFile(tokenPath)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Error reading new token file: %v\n", err)
|
|
||||||
return nil // Continue processing other rows
|
|
||||||
}
|
|
||||||
newToken := strings.TrimSpace(string(content)) // Remove leading and trailing whitespace
|
|
||||||
|
|
||||||
oldToken, ok := obj["token"].(string)
|
|
||||||
if !ok {
|
|
||||||
fmt.Printf("Skipping invalid token without .token\n")
|
|
||||||
return nil // Skip if token is not present
|
|
||||||
}
|
|
||||||
|
|
||||||
if oldToken == newToken {
|
|
||||||
return nil // Skip if token is already up-to-date
|
|
||||||
}
|
|
||||||
|
|
||||||
obj["token"] = newToken
|
|
||||||
updatedValue, err := json.Marshal(obj)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Error marshalling updated JSON: %v\n", err)
|
|
||||||
return nil // Continue processing other rows
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := authIndex.Delete([]byte(oldToken)); err != nil {
|
|
||||||
fmt.Printf("Error deleting old token index in authorizationindexv1: %v\n", err)
|
|
||||||
return nil // Continue processing other rows
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := authIndex.Put([]byte(newToken), k); err != nil {
|
|
||||||
fmt.Printf("Error adding new token index in authorizationindexv1: %v\n", err)
|
|
||||||
return nil // Continue processing other rows
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := authBucket.Put(k, updatedValue); err != nil {
|
|
||||||
fmt.Printf("Error updating token in authorizationsv1: %v\n", err)
|
|
||||||
return nil // Continue processing other rows
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("Updated token: '%s'\n", description)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Error during transaction: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if any tokens were not processed
|
|
||||||
if len(tokenPaths) > 0 {
|
|
||||||
fmt.Println("Warning: The following tokens were not encountered:")
|
|
||||||
for token := range tokenPaths {
|
|
||||||
fmt.Printf("- %s\n", token)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,866 +0,0 @@
|
||||||
{
|
|
||||||
config,
|
|
||||||
lib,
|
|
||||||
pkgs,
|
|
||||||
...
|
|
||||||
}: let
|
|
||||||
inherit
|
|
||||||
(lib)
|
|
||||||
concatMap
|
|
||||||
concatMapStrings
|
|
||||||
count
|
|
||||||
elem
|
|
||||||
escapeShellArg
|
|
||||||
escapeShellArgs
|
|
||||||
filter
|
|
||||||
flip
|
|
||||||
genAttrs
|
|
||||||
getExe
|
|
||||||
hasAttr
|
|
||||||
head
|
|
||||||
mkBefore
|
|
||||||
mkEnableOption
|
|
||||||
mkIf
|
|
||||||
mkOption
|
|
||||||
optional
|
|
||||||
optionalString
|
|
||||||
optionals
|
|
||||||
types
|
|
||||||
unique
|
|
||||||
;
|
|
||||||
|
|
||||||
cfg = config.services.influxdb2;
|
|
||||||
|
|
||||||
tokenManipulator = pkgs.buildGoModule rec {
|
|
||||||
pname = "influx-token-manipulator";
|
|
||||||
version = "1.0.0";
|
|
||||||
src = ./influx-token-manipulator;
|
|
||||||
postPatch = ''
|
|
||||||
sed -i '/Add token secrets here/ r ${
|
|
||||||
pkgs.writeText "token-paths" (concatMapStrings
|
|
||||||
(x: ''"${x.id}": "${x.tokenFile}",''\n'')
|
|
||||||
(filter (x: x.tokenFile != null) cfg.provision.ensureApiTokens))
|
|
||||||
}' main.go
|
|
||||||
'';
|
|
||||||
vendorHash = "sha256-zBZk7JbNILX18g9+2ukiESnFtnIVWhdN/J/MBhIITh8=";
|
|
||||||
|
|
||||||
meta = with lib; {
|
|
||||||
description = "Utility program to manipulate influxdb api tokens for declarative setups";
|
|
||||||
mainProgram = "influx-token-manipulator";
|
|
||||||
license = with licenses; [mit];
|
|
||||||
maintainers = with maintainers; [oddlama];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
in {
|
|
||||||
options.services.influxdb2.provision = {
|
|
||||||
enable = mkEnableOption "initial database setup";
|
|
||||||
initialSetup = {
|
|
||||||
organization = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
example = "main";
|
|
||||||
description = "Primary organization name";
|
|
||||||
};
|
|
||||||
bucket = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
example = "example";
|
|
||||||
description = "Primary bucket name";
|
|
||||||
};
|
|
||||||
username = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "admin";
|
|
||||||
description = "Primary username";
|
|
||||||
};
|
|
||||||
retention = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "0";
|
|
||||||
description = ''
|
|
||||||
The duration for which the bucket will retain data (0 is infinite).
|
|
||||||
Accepted units are `ns` (nanoseconds), `us` or `µs` (microseconds), `ms` (milliseconds),
|
|
||||||
`s` (seconds), `m` (minutes), `h` (hours), `d` (days) and `w` (weeks).
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
passwordFile = mkOption {
|
|
||||||
type = types.path;
|
|
||||||
description = "Password for primary user. Don't use a file from the nix store!";
|
|
||||||
};
|
|
||||||
tokenFile = mkOption {
|
|
||||||
type = types.nullOr types.path;
|
|
||||||
default = null;
|
|
||||||
description = "API Token for the admin user. If not given, influx will automatically generate one.";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
deleteOrganizations = mkOption {
|
|
||||||
description = "List of organizations that should be deleted.";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
deleteBuckets = mkOption {
|
|
||||||
description = "List of buckets that should be deleted.";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf (types.submodule {
|
|
||||||
options.name = mkOption {
|
|
||||||
description = "Name of the bucket.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.org = mkOption {
|
|
||||||
description = "The organization to which the bucket belongs.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
deleteUsers = mkOption {
|
|
||||||
description = "List of users that should be deleted.";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
deleteRemotes = mkOption {
|
|
||||||
description = "List of remotes that should be deleted.";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf (types.submodule {
|
|
||||||
options.name = mkOption {
|
|
||||||
description = "Name of the remote.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.org = mkOption {
|
|
||||||
description = "The organization to which the remote belongs.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
deleteReplications = mkOption {
|
|
||||||
description = "List of replications that should be deleted.";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf (types.submodule {
|
|
||||||
options.name = mkOption {
|
|
||||||
description = "Name of the replication.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.org = mkOption {
|
|
||||||
description = "The organization to which the replication belongs.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
deleteApiTokens = mkOption {
|
|
||||||
description = "List of api tokens that should be deleted.";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf (types.submodule ({config, ...}: {
|
|
||||||
options.id = mkOption {
|
|
||||||
description = "A unique identifier for this token. See `ensureApiTokens.*.name` for more information.";
|
|
||||||
readOnly = true;
|
|
||||||
default = builtins.substring 0 32 (builtins.hashString "sha256" "${config.user}:${config.org}:${config.name}");
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.name = mkOption {
|
|
||||||
description = "Name of the api token.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.org = mkOption {
|
|
||||||
description = "The organization to which the api token belongs.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.user = mkOption {
|
|
||||||
description = "The user to which the api token belongs.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
}));
|
|
||||||
};
|
|
||||||
|
|
||||||
ensureOrganizations = mkOption {
|
|
||||||
description = "List of organizations that should be created. Future changes to the name will not be reflected.";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf (types.submodule {
|
|
||||||
options.name = mkOption {
|
|
||||||
description = "Name of the organization.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.description = mkOption {
|
|
||||||
description = "Optional description for the organization.";
|
|
||||||
default = null;
|
|
||||||
type = types.nullOr types.str;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
ensureBuckets = mkOption {
|
|
||||||
description = "List of buckets that should be created. Future changes to the name or org will not be reflected.";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf (types.submodule {
|
|
||||||
options.name = mkOption {
|
|
||||||
description = "Name of the bucket.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.org = mkOption {
|
|
||||||
description = "The organization the bucket belongs to.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.description = mkOption {
|
|
||||||
description = "Optional description for the bucket.";
|
|
||||||
default = null;
|
|
||||||
type = types.nullOr types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.retention = mkOption {
|
|
||||||
type = types.str;
|
|
||||||
default = "0";
|
|
||||||
description = ''
|
|
||||||
The duration for which the bucket will retain data (0 is infinite).
|
|
||||||
Accepted units are `ns` (nanoseconds), `us` or `µs` (microseconds), `ms` (milliseconds),
|
|
||||||
`s` (seconds), `m` (minutes), `h` (hours), `d` (days) and `w` (weeks).
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
ensureUsers = mkOption {
|
|
||||||
description = "List of users that should be created. Future changes to the name or primary org will not be reflected.";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf (types.submodule {
|
|
||||||
options.name = mkOption {
|
|
||||||
description = "Name of the user.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.org = mkOption {
|
|
||||||
description = "Primary organization to which the user will be added as a member.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.passwordFile = mkOption {
|
|
||||||
description = "Password for the user. If unset, the user will not be able to log in until a password is set by an operator! Don't use a file from the nix store!";
|
|
||||||
type = types.nullOr types.path;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
ensureRemotes = mkOption {
|
|
||||||
description = "List of remotes that should be created. Future changes to the name, org or remoteOrg will not be reflected.";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf (types.submodule {
|
|
||||||
options.name = mkOption {
|
|
||||||
description = "Name of the remote.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.org = mkOption {
|
|
||||||
description = "Organization to which the remote belongs.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.description = mkOption {
|
|
||||||
description = "Optional description for the remote.";
|
|
||||||
default = null;
|
|
||||||
type = types.nullOr types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.remoteUrl = mkOption {
|
|
||||||
description = "The url where the remote instance can be reached";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.remoteOrg = mkOption {
|
|
||||||
description = ''
|
|
||||||
Corresponding remote organization. If this is used instead of `remoteOrgId`,
|
|
||||||
the remote organization id must be queried first which means the provided remote
|
|
||||||
token must have the `read-orgs` flag.
|
|
||||||
'';
|
|
||||||
type = types.nullOr types.str;
|
|
||||||
default = null;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.remoteOrgId = mkOption {
|
|
||||||
description = "Corresponding remote organization id.";
|
|
||||||
type = types.nullOr types.str;
|
|
||||||
default = null;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.remoteTokenFile = mkOption {
|
|
||||||
type = types.path;
|
|
||||||
description = "API token used to authenticate with the remote.";
|
|
||||||
};
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
ensureReplications = mkOption {
|
|
||||||
description = "List of replications that should be created. Future changes to name, org or buckets will not be reflected.";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf (types.submodule {
|
|
||||||
options.name = mkOption {
|
|
||||||
description = "Name of the remote.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.org = mkOption {
|
|
||||||
description = "Organization to which the replication belongs.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.remote = mkOption {
|
|
||||||
description = "The remote to replicate to.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.localBucket = mkOption {
|
|
||||||
description = "The local bucket to replicate from.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.remoteBucket = mkOption {
|
|
||||||
description = "The remte bucket to replicate to.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
ensureApiTokens = mkOption {
|
|
||||||
description = "List of api tokens that should be created. Future changes to existing tokens cannot be reflected.";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf (types.submodule ({config, ...}: {
|
|
||||||
options.id = mkOption {
|
|
||||||
description = "A unique identifier for this token. Since influx doesn't store names for tokens, this will be hashed and appended to the description to identify the token.";
|
|
||||||
readOnly = true;
|
|
||||||
default = builtins.substring 0 32 (builtins.hashString "sha256" "${config.user}:${config.org}:${config.name}");
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.name = mkOption {
|
|
||||||
description = "A name to identify this token. Not an actual influxdb attribute, but needed to calculate a stable id (see `id`).";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.user = mkOption {
|
|
||||||
description = "The user to which the token belongs.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.org = mkOption {
|
|
||||||
description = "Organization to which the token belongs.";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.description = mkOption {
|
|
||||||
description = ''
|
|
||||||
Optional description for the api token.
|
|
||||||
Note that the actual token will always be created with a description regardless
|
|
||||||
of whether this is given or not. A unique suffix has to be appended to later identify
|
|
||||||
the token to track whether it has already been created.
|
|
||||||
'';
|
|
||||||
default = null;
|
|
||||||
type = types.nullOr types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.tokenFile = mkOption {
|
|
||||||
type = types.nullOr types.path;
|
|
||||||
default = null;
|
|
||||||
description = "The token value. If not given, influx will automatically generate one.";
|
|
||||||
};
|
|
||||||
|
|
||||||
options.operator = mkOption {
|
|
||||||
description = "Grants all permissions in all organizations.";
|
|
||||||
default = false;
|
|
||||||
type = types.bool;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.allAccess = mkOption {
|
|
||||||
description = "Grants all permissions in the associated organization.";
|
|
||||||
default = false;
|
|
||||||
type = types.bool;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.readPermissions = mkOption {
|
|
||||||
description = ''
|
|
||||||
The read permissions to include for this token. Access is usually granted only
|
|
||||||
for resources in the associated organization.
|
|
||||||
|
|
||||||
Available permissions are `authorizations`, `buckets`, `dashboards`,
|
|
||||||
`orgs`, `tasks`, `telegrafs`, `users`, `variables`, `secrets`, `labels`, `views`,
|
|
||||||
`documents`, `notificationRules`, `notificationEndpoints`, `checks`, `dbrp`,
|
|
||||||
`annotations`, `sources`, `scrapers`, `notebooks`, `remotes`, `replications`.
|
|
||||||
|
|
||||||
Refer to `influx auth create --help` for a full list with descriptions.
|
|
||||||
|
|
||||||
`buckets` grants read access to all associated buckets. Use `readBuckets` to define
|
|
||||||
more granular access permissions.
|
|
||||||
'';
|
|
||||||
default = [];
|
|
||||||
type = types.listOf types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.writePermissions = mkOption {
|
|
||||||
description = ''
|
|
||||||
The read permissions to include for this token. Access is usually granted only
|
|
||||||
for resources in the associated organization.
|
|
||||||
|
|
||||||
Available permissions are `authorizations`, `buckets`, `dashboards`,
|
|
||||||
`orgs`, `tasks`, `telegrafs`, `users`, `variables`, `secrets`, `labels`, `views`,
|
|
||||||
`documents`, `notificationRules`, `notificationEndpoints`, `checks`, `dbrp`,
|
|
||||||
`annotations`, `sources`, `scrapers`, `notebooks`, `remotes`, `replications`.
|
|
||||||
|
|
||||||
Refer to `influx auth create --help` for a full list with descriptions.
|
|
||||||
|
|
||||||
`buckets` grants write access to all associated buckets. Use `writeBuckets` to define
|
|
||||||
more granular access permissions.
|
|
||||||
'';
|
|
||||||
default = [];
|
|
||||||
type = types.listOf types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.readBuckets = mkOption {
|
|
||||||
description = "The organization's buckets which should be allowed to be read";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
options.writeBuckets = mkOption {
|
|
||||||
description = "The organization's buckets which should be allowed to be written";
|
|
||||||
default = [];
|
|
||||||
type = types.listOf types.str;
|
|
||||||
};
|
|
||||||
}));
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
config = mkIf (cfg.enable && cfg.provision.enable) {
|
|
||||||
assertions = let
|
|
||||||
validPermissions = flip genAttrs (x: true) [
|
|
||||||
"authorizations"
|
|
||||||
"buckets"
|
|
||||||
"dashboards"
|
|
||||||
"orgs"
|
|
||||||
"tasks"
|
|
||||||
"telegrafs"
|
|
||||||
"users"
|
|
||||||
"variables"
|
|
||||||
"secrets"
|
|
||||||
"labels"
|
|
||||||
"views"
|
|
||||||
"documents"
|
|
||||||
"notificationRules"
|
|
||||||
"notificationEndpoints"
|
|
||||||
"checks"
|
|
||||||
"dbrp"
|
|
||||||
"annotations"
|
|
||||||
"sources"
|
|
||||||
"scrapers"
|
|
||||||
"notebooks"
|
|
||||||
"remotes"
|
|
||||||
"replications"
|
|
||||||
];
|
|
||||||
|
|
||||||
knownOrgs = map (x: x.name) cfg.provision.ensureOrganizations;
|
|
||||||
knownRemotes = map (x: x.name) cfg.provision.ensureRemotes;
|
|
||||||
knownBucketsFor = org: map (x: x.name) (filter (x: x.org == org) cfg.provision.ensureBuckets);
|
|
||||||
in
|
|
||||||
flip concatMap cfg.provision.ensureBuckets (bucket: [
|
|
||||||
{
|
|
||||||
assertion = elem bucket.org knownOrgs;
|
|
||||||
message = "The influxdb bucket '${bucket.name}' refers to an unknown organization '${bucket.org}'.";
|
|
||||||
}
|
|
||||||
])
|
|
||||||
++ flip concatMap cfg.provision.ensureUsers (user: [
|
|
||||||
{
|
|
||||||
assertion = elem user.org knownOrgs;
|
|
||||||
message = "The influxdb user '${user.name}' refers to an unknown organization '${user.org}'.";
|
|
||||||
}
|
|
||||||
])
|
|
||||||
++ flip concatMap cfg.provision.ensureRemotes (remote: [
|
|
||||||
{
|
|
||||||
assertion = (remote.remoteOrgId == null) != (remote.remoteOrg == null);
|
|
||||||
message = "The influxdb remote '${remote.name}' must specify exactly one of remoteOrgId or remoteOrg.";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assertion = elem remote.org knownOrgs;
|
|
||||||
message = "The influxdb remote '${remote.name}' refers to an unknown organization '${remote.org}'.";
|
|
||||||
}
|
|
||||||
])
|
|
||||||
++ flip concatMap cfg.provision.ensureReplications (replication: [
|
|
||||||
{
|
|
||||||
assertion = elem replication.remote knownRemotes;
|
|
||||||
message = "The influxdb replication '${replication.name}' refers to an unknown remote '${replication.remote}'.";
|
|
||||||
}
|
|
||||||
(let
|
|
||||||
remote = head (filter (x: x.name == replication.remote) cfg.provision.ensureRemotes);
|
|
||||||
in {
|
|
||||||
assertion = elem replication.localBucket (knownBucketsFor remote.org);
|
|
||||||
message = "The influxdb replication '${replication.name}' refers to an unknown bucket '${replication.localBucket}' in organization '${remote.org}'.";
|
|
||||||
})
|
|
||||||
])
|
|
||||||
++ flip concatMap cfg.provision.ensureApiTokens (apiToken: let
|
|
||||||
validBuckets = flip genAttrs (x: true) (knownBucketsFor apiToken.org);
|
|
||||||
in [
|
|
||||||
{
|
|
||||||
assertion = elem apiToken.org knownOrgs;
|
|
||||||
message = "The influxdb apiToken '${apiToken.name}' refers to an unknown organization '${apiToken.org}'.";
|
|
||||||
}
|
|
||||||
{
|
|
||||||
assertion =
|
|
||||||
1
|
|
||||||
== count (x: x) [
|
|
||||||
apiToken.operator
|
|
||||||
apiToken.allAccess
|
|
||||||
(apiToken.readPermissions
|
|
||||||
!= []
|
|
||||||
|| apiToken.writePermissions != []
|
|
||||||
|| apiToken.readBuckets != []
|
|
||||||
|| apiToken.writeBuckets != [])
|
|
||||||
];
|
|
||||||
message = "The influxdb apiToken '${apiToken.name}' in organization '${apiToken.org}' uses mutually exclusive options. The `operator` and `allAccess` options are mutually exclusive with each other and the granular permission settings.";
|
|
||||||
}
|
|
||||||
(let
|
|
||||||
unknownBuckets = filter (x: !hasAttr x validBuckets) apiToken.readBuckets;
|
|
||||||
in {
|
|
||||||
assertion = unknownBuckets == [];
|
|
||||||
message = "The influxdb apiToken '${apiToken.name}' refers to invalid buckets in readBuckets: ${toString unknownBuckets}";
|
|
||||||
})
|
|
||||||
(let
|
|
||||||
unknownBuckets = filter (x: !hasAttr x validBuckets) apiToken.writeBuckets;
|
|
||||||
in {
|
|
||||||
assertion = unknownBuckets == [];
|
|
||||||
message = "The influxdb apiToken '${apiToken.name}' refers to invalid buckets in writeBuckets: ${toString unknownBuckets}";
|
|
||||||
})
|
|
||||||
(let
|
|
||||||
unknownPerms = filter (x: !hasAttr x validPermissions) apiToken.readPermissions;
|
|
||||||
in {
|
|
||||||
assertion = unknownPerms == [];
|
|
||||||
message = "The influxdb apiToken '${apiToken.name}' refers to invalid read permissions: ${toString unknownPerms}";
|
|
||||||
})
|
|
||||||
(let
|
|
||||||
unknownPerms = filter (x: !hasAttr x validPermissions) apiToken.writePermissions;
|
|
||||||
in {
|
|
||||||
assertion = unknownPerms == [];
|
|
||||||
message = "The influxdb apiToken '${apiToken.name}' refers to invalid write permissions: ${toString unknownPerms}";
|
|
||||||
})
|
|
||||||
]);
|
|
||||||
|
|
||||||
systemd.services.influxdb2 = {
|
|
||||||
# Mark if this is the first startup so postStart can do the initial setup
|
|
||||||
preStart = ''
|
|
||||||
if ! test -e "$STATE_DIRECTORY/influxd.bolt"; then
|
|
||||||
touch "$STATE_DIRECTORY/.first_startup"
|
|
||||||
else
|
|
||||||
# Manipulate provisioned api tokens if necessary
|
|
||||||
${getExe tokenManipulator} "$STATE_DIRECTORY/influxd.bolt"
|
|
||||||
fi
|
|
||||||
'';
|
|
||||||
|
|
||||||
postStart = let
|
|
||||||
influxCli = getExe pkgs.influxdb2-cli;
|
|
||||||
in
|
|
||||||
''
|
|
||||||
set -euo pipefail
|
|
||||||
export INFLUX_HOST="http://"${escapeShellArg config.services.influxdb2.settings.http-bind-address}
|
|
||||||
|
|
||||||
# Wait for the influxdb server to come online
|
|
||||||
count=0
|
|
||||||
while ! ${influxCli} ping &>/dev/null; do
|
|
||||||
if [ "$count" -eq 300 ]; then
|
|
||||||
echo "Tried for 30 seconds, giving up..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! kill -0 "$MAINPID"; then
|
|
||||||
echo "Main server died, giving up..."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
sleep 0.1
|
|
||||||
count=$((count++))
|
|
||||||
done
|
|
||||||
|
|
||||||
if test -e "$STATE_DIRECTORY/.first_startup"; then
|
|
||||||
# Do the initial database setup. Pass /dev/null as configs-path to
|
|
||||||
# avoid saving the token as the active config.
|
|
||||||
${influxCli} setup \
|
|
||||||
--configs-path /dev/null \
|
|
||||||
--org ${escapeShellArg cfg.provision.initialSetup.organization} \
|
|
||||||
--bucket ${escapeShellArg cfg.provision.initialSetup.bucket} \
|
|
||||||
--username ${escapeShellArg cfg.provision.initialSetup.username} \
|
|
||||||
--password "$(< ${escapeShellArg cfg.provision.initialSetup.passwordFile})" \
|
|
||||||
--token "$(< ${escapeShellArg cfg.provision.initialSetup.tokenFile})" \
|
|
||||||
--retention ${escapeShellArg cfg.provision.initialSetup.retention} \
|
|
||||||
--force >/dev/null
|
|
||||||
|
|
||||||
rm -f "$STATE_DIRECTORY/.first_startup"
|
|
||||||
fi
|
|
||||||
|
|
||||||
export INFLUX_TOKEN=$(< ${escapeShellArg cfg.provision.initialSetup.tokenFile})
|
|
||||||
any_tokens_created=0
|
|
||||||
''
|
|
||||||
+ flip concatMapStrings cfg.provision.deleteApiTokens (apiToken: ''
|
|
||||||
if id=$(
|
|
||||||
${influxCli} auth list --json --org ${escapeShellArg apiToken.org} 2>/dev/null \
|
|
||||||
| ${getExe pkgs.jq} -r '.[] | select(.description | contains("${apiToken.id}")) | .id'
|
|
||||||
) && [[ -n "$id" ]]; then
|
|
||||||
${influxCli} auth delete --id "$id" >/dev/null
|
|
||||||
echo "Deleted api token id="${escapeShellArg apiToken.id}
|
|
||||||
fi
|
|
||||||
'')
|
|
||||||
+ flip concatMapStrings cfg.provision.deleteReplications (replication: ''
|
|
||||||
if id=$(
|
|
||||||
${influxCli} replication list --json --org ${escapeShellArg replication.org} --name ${escapeShellArg replication.name} 2>/dev/null \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
); then
|
|
||||||
${influxCli} replication delete --id "$id" >/dev/null
|
|
||||||
echo "Deleted replication org="${escapeShellArg replication.org}" name="${escapeShellArg replication.name}
|
|
||||||
fi
|
|
||||||
'')
|
|
||||||
+ flip concatMapStrings cfg.provision.deleteRemotes (remote: ''
|
|
||||||
if id=$(
|
|
||||||
${influxCli} remote list --json --org ${escapeShellArg remote.org} --name ${escapeShellArg remote.name} 2>/dev/null \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
); then
|
|
||||||
${influxCli} remote delete --id "$id" >/dev/null
|
|
||||||
echo "Deleted remote org="${escapeShellArg remote.org}" name="${escapeShellArg remote.name}
|
|
||||||
fi
|
|
||||||
'')
|
|
||||||
+ flip concatMapStrings cfg.provision.deleteUsers (user: ''
|
|
||||||
if id=$(
|
|
||||||
${influxCli} user list --json --name ${escapeShellArg user} 2>/dev/null \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
); then
|
|
||||||
${influxCli} user delete --id "$id" >/dev/null
|
|
||||||
echo "Deleted user name="${escapeShellArg user}
|
|
||||||
fi
|
|
||||||
'')
|
|
||||||
+ flip concatMapStrings cfg.provision.deleteBuckets (bucket: ''
|
|
||||||
if id=$(
|
|
||||||
${influxCli} bucket list --json --org ${escapeShellArg bucket.org} --name ${escapeShellArg bucket.name} 2>/dev/null \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
); then
|
|
||||||
${influxCli} bucket delete --id "$id" >/dev/null
|
|
||||||
echo "Deleted bucket org="${escapeShellArg bucket.org}" name="${escapeShellArg bucket.name}
|
|
||||||
fi
|
|
||||||
'')
|
|
||||||
+ flip concatMapStrings cfg.provision.deleteOrganizations (org: ''
|
|
||||||
if id=$(
|
|
||||||
${influxCli} org list --json --name ${escapeShellArg org} 2>/dev/null \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
); then
|
|
||||||
${influxCli} org delete --id "$id" >/dev/null
|
|
||||||
echo "Deleted org name="${escapeShellArg org}
|
|
||||||
fi
|
|
||||||
'')
|
|
||||||
+ flip concatMapStrings cfg.provision.ensureOrganizations (org: let
|
|
||||||
listArgs = [
|
|
||||||
"--name"
|
|
||||||
org.name
|
|
||||||
];
|
|
||||||
updateArgs = optionals (org.description != null) [
|
|
||||||
"--description"
|
|
||||||
org.description
|
|
||||||
];
|
|
||||||
createArgs = listArgs ++ updateArgs;
|
|
||||||
in ''
|
|
||||||
if id=$(
|
|
||||||
${influxCli} org list --json ${escapeShellArgs listArgs} 2>/dev/null \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
); then
|
|
||||||
${influxCli} org update --id "$id" ${escapeShellArgs updateArgs} >/dev/null
|
|
||||||
else
|
|
||||||
${influxCli} org create ${escapeShellArgs createArgs} >/dev/null
|
|
||||||
echo "Created org name="${escapeShellArg org.name}
|
|
||||||
fi
|
|
||||||
'')
|
|
||||||
+ flip concatMapStrings cfg.provision.ensureBuckets (bucket: let
|
|
||||||
listArgs = [
|
|
||||||
"--org"
|
|
||||||
bucket.org
|
|
||||||
"--name"
|
|
||||||
bucket.name
|
|
||||||
];
|
|
||||||
updateArgs =
|
|
||||||
[
|
|
||||||
"--retention"
|
|
||||||
bucket.retention
|
|
||||||
]
|
|
||||||
++ optionals (bucket.description != null) [
|
|
||||||
"--description"
|
|
||||||
bucket.description
|
|
||||||
];
|
|
||||||
createArgs = listArgs ++ updateArgs;
|
|
||||||
in ''
|
|
||||||
if id=$(
|
|
||||||
${influxCli} bucket list --json ${escapeShellArgs listArgs} 2>/dev/null \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
); then
|
|
||||||
${influxCli} bucket update --id "$id" ${escapeShellArgs updateArgs} >/dev/null
|
|
||||||
else
|
|
||||||
${influxCli} bucket create ${escapeShellArgs createArgs} >/dev/null
|
|
||||||
echo "Created bucket org="${escapeShellArg bucket.org}" name="${escapeShellArg bucket.name}
|
|
||||||
fi
|
|
||||||
'')
|
|
||||||
+ flip concatMapStrings cfg.provision.ensureUsers (user: let
|
|
||||||
listArgs = [
|
|
||||||
"--name"
|
|
||||||
user.name
|
|
||||||
];
|
|
||||||
createArgs =
|
|
||||||
listArgs
|
|
||||||
++ [
|
|
||||||
"--org"
|
|
||||||
user.org
|
|
||||||
];
|
|
||||||
in
|
|
||||||
''
|
|
||||||
if id=$(
|
|
||||||
${influxCli} user list --json ${escapeShellArgs listArgs} 2>/dev/null \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
); then
|
|
||||||
true # No updateable args
|
|
||||||
else
|
|
||||||
${influxCli} user create ${escapeShellArgs createArgs} >/dev/null
|
|
||||||
echo "Created user name="${escapeShellArg user.name}
|
|
||||||
fi
|
|
||||||
''
|
|
||||||
+ optionalString (user.passwordFile != null) ''
|
|
||||||
${influxCli} user password ${escapeShellArgs listArgs} \
|
|
||||||
--password "$(< ${escapeShellArg user.passwordFile})" >/dev/null
|
|
||||||
'')
|
|
||||||
+ flip concatMapStrings cfg.provision.ensureRemotes (remote: let
|
|
||||||
listArgs = [
|
|
||||||
"--name"
|
|
||||||
remote.name
|
|
||||||
"--org"
|
|
||||||
remote.org
|
|
||||||
];
|
|
||||||
updateArgs =
|
|
||||||
[
|
|
||||||
"--remote-url"
|
|
||||||
remote.remoteUrl
|
|
||||||
]
|
|
||||||
++ optionals (remote.remoteOrgId != null) [
|
|
||||||
"--remote-org-id"
|
|
||||||
remote.remoteOrgId
|
|
||||||
]
|
|
||||||
++ optionals (remote.description != null) [
|
|
||||||
"--description"
|
|
||||||
remote.description
|
|
||||||
];
|
|
||||||
createArgs = listArgs ++ updateArgs;
|
|
||||||
in ''
|
|
||||||
if id=$(
|
|
||||||
${influxCli} remote list --json ${escapeShellArgs listArgs} 2>/dev/null \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
); then
|
|
||||||
${influxCli} remote update --id "$id" ${escapeShellArgs updateArgs} >/dev/null \
|
|
||||||
--remote-api-token "$(< ${escapeShellArg remote.remoteTokenFile})"
|
|
||||||
else
|
|
||||||
extraArgs=()
|
|
||||||
${optionalString (remote.remoteOrg != null) ''
|
|
||||||
remote_org_id=$(
|
|
||||||
${influxCli} org list --json \
|
|
||||||
--host ${escapeShellArg remote.remoteUrl} \
|
|
||||||
--token "$(< ${escapeShellArg remote.remoteTokenFile})" \
|
|
||||||
--name ${escapeShellArg remote.remoteOrg} \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
)
|
|
||||||
extraArgs+=("--remote-org-id" "$remote_org_id")
|
|
||||||
''}
|
|
||||||
${influxCli} remote create ${escapeShellArgs createArgs} >/dev/null \
|
|
||||||
--remote-api-token "$(< ${escapeShellArg remote.remoteTokenFile})" \
|
|
||||||
"''${extraArgs[@]}"
|
|
||||||
echo "Created remote org="${escapeShellArg remote.org}" name="${escapeShellArg remote.name}
|
|
||||||
fi
|
|
||||||
'')
|
|
||||||
+ flip concatMapStrings cfg.provision.ensureReplications (replication: let
|
|
||||||
listArgs = [
|
|
||||||
"--name"
|
|
||||||
replication.name
|
|
||||||
"--org"
|
|
||||||
replication.org
|
|
||||||
];
|
|
||||||
createArgs =
|
|
||||||
listArgs
|
|
||||||
++ [
|
|
||||||
"--remote-bucket"
|
|
||||||
replication.remoteBucket
|
|
||||||
];
|
|
||||||
in ''
|
|
||||||
if id=$(
|
|
||||||
${influxCli} replication list --json ${escapeShellArgs listArgs} 2>/dev/null \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
); then
|
|
||||||
true # No updateable args
|
|
||||||
else
|
|
||||||
remote_id=$(
|
|
||||||
${influxCli} remote list --json --org ${escapeShellArg replication.org} --name ${escapeShellArg replication.remote} \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
)
|
|
||||||
local_bucket_id=$(
|
|
||||||
${influxCli} bucket list --json --org ${escapeShellArg replication.org} --name ${escapeShellArg replication.localBucket} \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
)
|
|
||||||
${influxCli} replication create ${escapeShellArgs createArgs} >/dev/null \
|
|
||||||
--remote-id "$remote_id" \
|
|
||||||
--local-bucket-id "$local_bucket_id"
|
|
||||||
echo "Created replication org="${escapeShellArg replication.org}" name="${escapeShellArg replication.name}
|
|
||||||
fi
|
|
||||||
'')
|
|
||||||
+ flip concatMapStrings cfg.provision.ensureApiTokens (apiToken: let
|
|
||||||
listArgs = [
|
|
||||||
"--user"
|
|
||||||
apiToken.user
|
|
||||||
"--org"
|
|
||||||
apiToken.org
|
|
||||||
];
|
|
||||||
createArgs =
|
|
||||||
listArgs
|
|
||||||
++ [
|
|
||||||
"--description"
|
|
||||||
("${apiToken.name} - " + optionalString (apiToken.description != null) "${apiToken.description} - " + apiToken.id)
|
|
||||||
]
|
|
||||||
++ optional apiToken.operator "--operator"
|
|
||||||
++ optional apiToken.allAccess "--all-access"
|
|
||||||
++ map (x: "--read-${x}") apiToken.readPermissions
|
|
||||||
++ map (x: "--write-${x}") apiToken.writePermissions;
|
|
||||||
in ''
|
|
||||||
if id=$(
|
|
||||||
${influxCli} auth list --json --org ${escapeShellArg apiToken.org} 2>/dev/null \
|
|
||||||
| ${getExe pkgs.jq} -r '.[] | select(.description | contains("${apiToken.id}")) | .id'
|
|
||||||
) && [[ -n "$id" ]]; then
|
|
||||||
true # No updateable args
|
|
||||||
else
|
|
||||||
declare -A bucketIds
|
|
||||||
${flip concatMapStrings (unique (apiToken.readBuckets ++ apiToken.writeBuckets)) (bucket: ''
|
|
||||||
bucketIds[${escapeShellArg bucket}]=$(
|
|
||||||
${influxCli} bucket list --json --org ${escapeShellArg apiToken.org} --name ${escapeShellArg bucket} \
|
|
||||||
| ${getExe pkgs.jq} -r ".[0].id"
|
|
||||||
)
|
|
||||||
'')}
|
|
||||||
extraArgs=(
|
|
||||||
${flip concatMapStrings apiToken.readBuckets (bucket: ''
|
|
||||||
"--read-bucket" "''${bucketIds[${escapeShellArg bucket}]}"
|
|
||||||
'')}
|
|
||||||
${flip concatMapStrings apiToken.writeBuckets (bucket: ''
|
|
||||||
"--write-bucket" "''${bucketIds[${escapeShellArg bucket}]}"
|
|
||||||
'')}
|
|
||||||
)
|
|
||||||
${influxCli} auth create ${escapeShellArgs createArgs} >/dev/null "''${extraArgs[@]}"
|
|
||||||
any_tokens_created=1
|
|
||||||
echo "Created api token org="${escapeShellArg apiToken.org}" user="${escapeShellArg apiToken.user}
|
|
||||||
fi
|
|
||||||
'')
|
|
||||||
+ ''
|
|
||||||
if [[ $any_tokens_created == 1 ]]; then
|
|
||||||
echo "Created new tokens, forcing service restart so we can manipulate secrets"
|
|
||||||
exit 75 # TEMPFAIL
|
|
||||||
fi
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
1067
modules/meta/influxdb2.nix
Normal file
1067
modules/meta/influxdb2.nix
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue