aboutsummaryrefslogtreecommitdiff
path: root/nixos/shared
diff options
context:
space:
mode:
Diffstat (limited to 'nixos/shared')
-rw-r--r--nixos/shared/alloy-nix-config/alloy_nix_config.go129
-rw-r--r--nixos/shared/alloy-nix-config/default.nix8
-rw-r--r--nixos/shared/alloy-nix-config/go.mod3
-rw-r--r--nixos/shared/grafana-matrix-forwarder/default.nix10
-rw-r--r--nixos/shared/grafana-matrix-forwarder/service.nix31
-rw-r--r--nixos/shared/monitoring.nix231
-rw-r--r--nixos/shared/prometheus-sql-exporter/default.nix20
-rw-r--r--nixos/shared/prometheus-sql-exporter/service.nix13
-rw-r--r--nixos/shared/vpn.nix8
9 files changed, 433 insertions, 20 deletions
diff --git a/nixos/shared/alloy-nix-config/alloy_nix_config.go b/nixos/shared/alloy-nix-config/alloy_nix_config.go
new file mode 100644
index 0000000..4b6eb63
--- /dev/null
+++ b/nixos/shared/alloy-nix-config/alloy_nix_config.go
@@ -0,0 +1,129 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "maps"
+ "os"
+ "slices"
+ "strconv"
+ "strings"
+)
+
+func main() {
+ if len(os.Args) != 3 {
+ fmt.Fprintf(os.Stderr, "usage: %s <json_path> <out_path>\n", os.Args[0])
+ os.Exit(1)
+ }
+
+ jsonPath := os.Args[1]
+ outPath := os.Args[2]
+
+ jsonData, err := os.ReadFile(jsonPath)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "error reading file %s: %v\n", jsonPath, err)
+ os.Exit(1)
+ }
+
+ // It would be nice to preserve the order of blocks ... except that we can't
+ // because Nix already doesn't preserve the order of attribute sets.
+ var config map[string]any
+ if err := json.Unmarshal(jsonData, &config); err != nil {
+ fmt.Fprintf(os.Stderr, "error parsing JSON: %v\n", err)
+ os.Exit(1)
+ }
+
+ result := formatConfig(config)
+
+ if err := os.WriteFile(outPath, []byte(result), 0644); err != nil {
+ fmt.Fprintf(os.Stderr, "error writing file %s: %v\n", outPath, err)
+ os.Exit(1)
+ }
+}
+
+func formatConfig(config map[string]any) string {
+ var s strings.Builder
+
+ for _, blockName := range slices.Sorted(maps.Keys(config)) {
+ labels := config[blockName]
+
+ if labelsMap, ok := labels.(map[string]any); ok {
+ for label, block := range labelsMap {
+ if blockMap, ok := block.(map[string]any); ok {
+ s.WriteString(formatBlock(blockName, label, blockMap, 0))
+ }
+ }
+ }
+ }
+
+ return s.String()
+}
+
+func formatBlock(blockName string, label string, block map[string]any, indent int) string {
+ var s strings.Builder
+
+ s.WriteString(strings.Repeat(" ", indent))
+ s.WriteString(blockName)
+ if label != "" {
+ s.WriteString(fmt.Sprintf(` %s`, strconv.Quote(label)))
+ }
+ s.WriteString(" {\n")
+
+ var blocks []any
+ if blocksValue, exists := block["blocks"]; exists {
+ if blocksList, ok := blocksValue.([]any); ok {
+ blocks = blocksList
+ }
+ delete(block, "blocks")
+ }
+
+ for _, key := range slices.Sorted(maps.Keys(block)) {
+ s.WriteString(strings.Repeat(" ", indent+1))
+ s.WriteString(fmt.Sprintf("%s = %s\n", key, formatValue(block[key])))
+ }
+
+ for _, blockItem := range blocks {
+ if blockMap, ok := blockItem.(map[string]any); ok {
+ var name string
+ if nameValue, exists := blockMap["name"]; exists {
+ if nameStr, ok := nameValue.(string); ok {
+ name = nameStr
+ }
+ delete(blockMap, "name")
+ }
+
+ s.WriteString(formatBlock(name, "", blockMap, indent+1))
+ }
+ }
+
+ s.WriteString(strings.Repeat(" ", indent))
+ s.WriteString("}\n")
+
+ return s.String()
+}
+
+func formatValue(value any) string {
+ switch v := value.(type) {
+ case string:
+ return strconv.Quote(v)
+ case map[string]any:
+ if ref, exists := v["$ref"]; exists {
+ if refStr, ok := ref.(string); ok {
+ return refStr
+ }
+ }
+ var parts []string
+ for _, name := range slices.Sorted(maps.Keys(v)) {
+ parts = append(parts, fmt.Sprintf("%s=%s,", name, formatValue(v[name])))
+ }
+ return "{" + strings.Join(parts, " ") + "}"
+ case []any:
+ var parts []string
+ for _, item := range v {
+ parts = append(parts, formatValue(item))
+ }
+ return "[" + strings.Join(parts, ", ") + "]"
+ default:
+ return fmt.Sprintf("%v", v)
+ }
+}
diff --git a/nixos/shared/alloy-nix-config/default.nix b/nixos/shared/alloy-nix-config/default.nix
new file mode 100644
index 0000000..d4efe02
--- /dev/null
+++ b/nixos/shared/alloy-nix-config/default.nix
@@ -0,0 +1,8 @@
+{ pkgs ? import <nixpkgs> {} }:
+
+pkgs.buildGoModule {
+ pname = "alloy-nix-config";
+ version = "git";
+ src = ./.;
+ vendorHash = null;
+}
diff --git a/nixos/shared/alloy-nix-config/go.mod b/nixos/shared/alloy-nix-config/go.mod
new file mode 100644
index 0000000..2916089
--- /dev/null
+++ b/nixos/shared/alloy-nix-config/go.mod
@@ -0,0 +1,3 @@
+module push-f.com/alloy-nix-config
+
+go 1.24.5
diff --git a/nixos/shared/grafana-matrix-forwarder/default.nix b/nixos/shared/grafana-matrix-forwarder/default.nix
new file mode 100644
index 0000000..7a04dcb
--- /dev/null
+++ b/nixos/shared/grafana-matrix-forwarder/default.nix
@@ -0,0 +1,10 @@
+{ buildGoModule }:
+let
+ sources = import <top/npins>;
+in
+buildGoModule {
+ pname = "grafana-matrix-forwarder";
+ version = sources.grafana-matrix-forwarder.version;
+ src = sources.grafana-matrix-forwarder;
+ vendorHash = "sha256-ifkeakyRkIF2Y/4otUWhTvUzsPwRb1Wxx6gqN0806c4=";
+}
diff --git a/nixos/shared/grafana-matrix-forwarder/service.nix b/nixos/shared/grafana-matrix-forwarder/service.nix
new file mode 100644
index 0000000..5ad511c
--- /dev/null
+++ b/nixos/shared/grafana-matrix-forwarder/service.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+let
+ grafanaMatrixForwarder = pkgs.callPackage ./default.nix {};
+ cfg = config.services.grafana-matrix-forwarder;
+in
+{
+ options.services.grafana-matrix-forwarder = {
+ enable = lib.mkEnableOption "grafana-matrix-forwarder";
+ port = lib.mkOption {
+ type = lib.types.int;
+ };
+ homeserver = lib.mkOption {
+ type = lib.types.str;
+ };
+ environmentFile = lib.mkOption {
+ type = lib.types.path;
+ };
+ };
+
+ config = lib.mkIf cfg.enable {
+ systemd.services.grafana-matrix-forwarder = {
+ serviceConfig = {
+ ExecStart = "${grafanaMatrixForwarder}/bin/grafana-matrix-forwarder --port=${toString cfg.port} --homeserver ${cfg.homeserver}";
+ EnvironmentFile = cfg.environmentFile;
+ DynamicUser = "true";
+ };
+ wantedBy = ["multi-user.target"];
+ };
+ };
+}
diff --git a/nixos/shared/monitoring.nix b/nixos/shared/monitoring.nix
new file mode 100644
index 0000000..8711630
--- /dev/null
+++ b/nixos/shared/monitoring.nix
@@ -0,0 +1,231 @@
+{ config, lib, pkgs, ... }:
+
+let
+ cfg = config.monitoring;
+ helpers = import <top/helpers.nix> { inherit config lib pkgs; };
+in
+{
+ options.monitoring = {
+ lokiPort = lib.mkOption {
+ type = lib.types.int;
+ };
+ alloyUiPort = lib.mkOption {
+ type = lib.types.int;
+ };
+ prometheusNodeExporterPort = lib.mkOption {
+ type = lib.types.int;
+ };
+ prometheusScrapeConfigs = lib.mkOption {
+ type = lib.types.listOf lib.types.attrs;
+ default = [];
+ };
+ };
+ config = {
+ services.prometheus = {
+ enable = true;
+
+ retentionTime = "1y";
+
+ scrapeConfigs = [
+ {
+ job_name = "node";
+ static_configs = [{
+ targets = [ "localhost:${toString cfg.prometheusNodeExporterPort}" ];
+ }];
+ }
+ ] ++ cfg.prometheusScrapeConfigs;
+
+ exporters.node = {
+ enable = true;
+ enabledCollectors = [ "systemd" ];
+ port = cfg.prometheusNodeExporterPort;
+ };
+ };
+
+ services.loki = {
+ enable = true;
+ configuration = {
+ server.http_listen_port = cfg.lokiPort;
+ auth_enabled = false;
+
+ ingester = {
+ lifecycler = {
+ address = "127.0.0.1";
+ ring = {
+ kvstore.store = "inmemory";
+ replication_factor = 1;
+ };
+ };
+ };
+
+ schema_config = {
+ configs = [{
+ store = "tsdb";
+ object_store = "filesystem";
+ schema = "v13";
+ index = {
+ prefix = "index_";
+ period = "24h";
+ };
+ }];
+ };
+
+ storage_config = {
+ tsdb_shipper = {
+ active_index_directory = "/var/lib/loki/tsdb-active";
+ cache_location = "/var/lib/loki/tsdb-cache";
+ };
+ };
+
+ compactor = {
+ working_directory = "/var/lib/loki";
+ };
+
+ limits_config = {
+ allow_structured_metadata = true;
+ };
+ };
+ };
+
+ systemd.services.alloy = {
+ serviceConfig = {
+ SupplementaryGroups = [
+ "systemd-journal"
+ ] ++ lib.optional config.services.nginx.enable config.services.nginx.group;
+ };
+ };
+
+ services.alloy = {
+ enable = true;
+ extraFlags = ["--server.http.listen-addr=0.0.0.0:${toString cfg.alloyUiPort}"];
+ configPath =
+ let
+ ref = helpers.alloyConfigRef;
+ in
+ helpers.writeAlloyConfig {
+ "loki.source.journal".journal = {
+ max_age = "12h0m0s";
+ relabel_rules = ref "discovery.relabel.journal.rules";
+ forward_to = [(ref "loki.process.journal.receiver")];
+ labels = {
+ host = "tente";
+ job = "systemd-journal";
+ };
+ };
+ "loki.process".journal = {
+ forward_to = [(ref "loki.write.default.receiver")];
+ blocks = [
+ {
+ name = "stage.match";
+ # Select messages from systemd services that have LogExtraFields=LOG_FORMAT=logfmt.
+ selector = ''{__journal_LOG_FORMAT="logfmt"}'';
+ blocks = [
+ { name = "stage.logfmt"; mapping = { time = ""; level = ""; }; }
+ { name = "stage.timestamp"; source = "time"; format = "RFC3339"; }
+ {
+ # The slog package of the Go standard library prints levels as uppercase.
+ name = "stage.template";
+ source = "level";
+ template = "{{ ToLower .Value }}";
+ }
+ { name = "stage.structured_metadata"; values = { level = ""; }; }
+ ];
+ }
+ ];
+ };
+ "discovery.relabel".journal = {
+ targets = [];
+ blocks = [
+ {
+ name = "rule";
+ source_labels = ["__journal__systemd_unit"];
+ target_label = "unit";
+ }
+ ];
+ };
+
+ "loki.source.file".nginx_access = {
+ targets = ref "local.file_match.nginx_access.targets";
+ forward_to = [(ref "loki.process.nginx_access.receiver")];
+ };
+ "local.file_match".nginx_access = {
+ path_targets = [{
+ __path__ = "/var/log/nginx/*.access.log";
+ }];
+ };
+ "loki.process".nginx_access = {
+ forward_to = [(ref "loki.write.default.receiver")];
+ blocks = [
+ { name = "stage.static_labels"; values = { job = "nginx"; }; }
+
+ {
+ # Extracting the log file name as vhost because it's more convenient
+ # to query for than the full filename. We could also use server_name
+ # but there could be wildcard server_names and Loki labels should have
+ # a low cardinality for performance reasons.
+ name = "stage.regex";
+ source = "filename";
+ expression = "(?P<vhost>[^/]+)\\.access\\.log$";
+ }
+
+ { name = "stage.labels"; values = { vhost = ""; }; }
+ { name = "stage.json"; expressions = { msec = ""; path = ""; }; }
+ { name = "stage.timestamp"; source = "msec"; format = "Unix"; }
+ {
+ # Setting level=info to prevent Loki's log level detection from wrongly
+ # detecting messages with paths containing "error" as errors.
+ # Creating the filetype entry via stage.template because there's no
+ # static_structured_metadata stage yet. (https://github.com/grafana/loki/issues/16703)
+ name = "stage.template";
+ source = "level";
+ template = "info";
+ }
+ { name = "stage.structured_metadata"; values = { level = ""; }; }
+
+ # Temporarily adding path as a label so that we can use it in the match selectors.
+ { name = "stage.labels"; values = { path = ""; }; }
+ {
+ name = "stage.match";
+ selector = "{path=~\"/\\\\.well-known/.*\"}";
+ # Creating the filetype entry via stage.template because there's no
+ # static_structured_metadata stage yet. (https://github.com/grafana/loki/issues/16703)
+ blocks = [
+ { name = "stage.template"; source = "filetype"; template = "well-known"; }
+ ];
+ }
+ {
+ name = "stage.match";
+ selector = "{path=\"/robots.txt\"}";
+ blocks = [
+ { name = "stage.template"; source = "filetype"; template = "robots.txt"; }
+ ];
+ }
+ {
+ name = "stage.match";
+ selector = "{path=~\".*\\\\.atom$\"}";
+ blocks = [
+ { name = "stage.template"; source = "filetype"; template = "feed"; }
+ ];
+ }
+ {
+ name = "stage.structured_metadata";
+ values = { filetype = ""; };
+ }
+
+ # Dropping path again because it has a too high cardinality for a label.
+ { name = "stage.label_drop"; values = ["path"]; }
+ ];
+ };
+ "loki.write".default = {
+ blocks = [
+ {
+ name = "endpoint";
+ url = "http://127.0.0.1:${toString cfg.lokiPort}/loki/api/v1/push";
+ }
+ ];
+ external_labels = {};
+ };
+ };
+ };
+ };
+}
diff --git a/nixos/shared/prometheus-sql-exporter/default.nix b/nixos/shared/prometheus-sql-exporter/default.nix
index 81f1660..5d80a62 100644
--- a/nixos/shared/prometheus-sql-exporter/default.nix
+++ b/nixos/shared/prometheus-sql-exporter/default.nix
@@ -1,21 +1,15 @@
{
lib,
buildGoModule,
- fetchFromGitHub,
}:
-
-buildGoModule rec {
+let
+ sources = import <top/npins>;
+in
+buildGoModule {
pname = "sql_exporter";
- version = "0.17.1";
-
- src = fetchFromGitHub {
- owner = "burningalchemist";
- repo = pname;
- rev = version;
- sha256 = "sha256-AEPFXPplHtny1P3gMvB1gbMj10bpu9PXc6ywliF+dCc=";
- };
-
- vendorHash = "sha256-KFWDqbdbXvgEtz1nlasWrvIckpzasUdzbb+AKfXmYf8=";
+ version = sources.prometheus-sql-exporter.version;
+ src = sources.prometheus-sql-exporter;
+ vendorHash = "sha256-eZxxmqoiXPdjZs/lwbzvWco9mDFy0zmpGDcqTIyWbK4=";
meta = with lib; {
description = "Database-agnostic SQL exporter for Prometheus";
diff --git a/nixos/shared/prometheus-sql-exporter/service.nix b/nixos/shared/prometheus-sql-exporter/service.nix
index a887f91..a79528c 100644
--- a/nixos/shared/prometheus-sql-exporter/service.nix
+++ b/nixos/shared/prometheus-sql-exporter/service.nix
@@ -4,7 +4,16 @@
let
sqlExporter = pkgs.callPackage ./default.nix {};
cfg = config.services.prometheus-sql-exporter;
- configFile = builtins.toFile "config.yaml" (builtins.toJSON cfg.config);
+ configFile = builtins.toFile "config.yaml" (builtins.toJSON cfg.config);
+ validateConfig = file:
+ pkgs.runCommand "validate-config"
+ {
+ nativeBuildInputs = [sqlExporter];
+ }
+ ''
+ sql_exporter -config.check -config.file "${file}"
+ ln -s "${file}" "$out"
+ '';
in
{
options.services.prometheus-sql-exporter = {
@@ -20,7 +29,7 @@ in
config = lib.mkIf cfg.enable {
systemd.services.prometheus-sql-exporter = {
serviceConfig = {
- ExecStart = "${sqlExporter}/bin/sql_exporter -config.file ${configFile} -web.listen-address :${toString cfg.port}";
+ ExecStart = "${sqlExporter}/bin/sql_exporter -config.file ${validateConfig configFile} -web.listen-address :${toString cfg.port}";
DynamicUser = "true";
User = "prometheus-sql-exporter";
};
diff --git a/nixos/shared/vpn.nix b/nixos/shared/vpn.nix
index 59fb225..9cbcf45 100644
--- a/nixos/shared/vpn.nix
+++ b/nixos/shared/vpn.nix
@@ -2,7 +2,6 @@
{
age.secrets.vpn-se-privKey.file = ../secrets/vpn-se-privKey.age;
- age.secrets.vpn-se-presharedKey.file = ../secrets/vpn-se-presharedKey.age;
# We're creating the wireguard interfaces in network namespaces so that
# we can use them on demand:
@@ -14,15 +13,14 @@
interfaces.wg-se = {
interfaceNamespace = "se";
- ips = ["10.148.171.71/32"];
+ ips = ["10.128.241.130/32"];
privateKeyFile = config.age.secrets.vpn-se-privKey.path;
peers = [
{
- publicKey = "PyLCXAQT8KkM4T+dUsOQfn+Ub3pGxfGlxkIApuig+hk=";
- presharedKeyFile = config.age.secrets.vpn-se-presharedKey.path;
+ publicKey = "sb61ho9MhaxhJd6WSrryVmknq0r6oHEW7PP5i4lzAgM=";
allowedIPs = ["0.0.0.0/0"];
- endpoint = "se3.vpn.airdns.org:1637";
+ endpoint = "se.gw.xeovo.com:51820";
}
];
};