{ config, lib, pkgs, ... }: let cfg = config.monitoring; in { options.monitoring = { grafanaUiPort = lib.mkOption { type = lib.types.int; }; lokiPort = lib.mkOption { type = lib.types.int; }; alloyUiPort = lib.mkOption { type = lib.types.int; }; prometheusNodeExporterPort = lib.mkOption { type = lib.types.int; }; prometheusSqlExporterPort = lib.mkOption { type = lib.types.int; }; }; imports = [ ]; config = { services.grafana = { enable = true; settings = { server = { http_addr = "0.0.0.0"; http_port = cfg.grafanaUiPort; }; }; provision = { enable = true; datasources.settings.datasources = [ { name = "Prometheus"; type = "prometheus"; url = "http://${config.services.prometheus.listenAddress}:${toString config.services.prometheus.port}"; } { name = "Loki"; type = "loki"; access = "proxy"; url = "http://127.0.0.1:${toString cfg.lokiPort}"; } ]; }; }; services.prometheus = { enable = true; scrapeConfigs = [ { job_name = "node"; static_configs = [{ targets = [ "localhost:${toString cfg.prometheusNodeExporterPort}" ]; }]; } { job_name = "sql"; static_configs = [{ targets = [ "localhost:${toString cfg.prometheusSqlExporterPort}" ]; }]; } ]; exporters.node = { enable = true; enabledCollectors = [ "systemd" ]; port = cfg.prometheusNodeExporterPort; }; }; services.prometheus-sql-exporter = { enable = true; port = cfg.prometheusSqlExporterPort; config = { target = { # This URL should be postgresql:///postgres?host=/run/postgresql # but sql_exporter uses xo/dburl which isn't spec-compliant: https://github.com/xo/dburl/issues/46 data_source_name = "postgresql:/run/postgresql:/postgres"; collectors = ["db-sizes"]; }; collectors = [ { collector_name = "db-sizes"; metrics = [ { metric_name = "pg_db_size_bytes"; help = "disk space used by the database"; type = "gauge"; key_labels = ["database_name"]; values = ["size"]; query = "SELECT datname AS database_name, pg_database_size(datname) as size from pg_database"; } ]; } ]; }; }; services.loki = { enable = true; configuration = { server.http_listen_port = cfg.lokiPort; auth_enabled = false; ingester = { lifecycler = { address = "127.0.0.1"; ring = { kvstore.store = "inmemory"; replication_factor = 1; }; }; }; schema_config = { configs = [{ store = "tsdb"; object_store = "filesystem"; schema = "v13"; index = { prefix = "index_"; period = "24h"; }; }]; }; storage_config = { tsdb_shipper = { active_index_directory = "/var/lib/loki/tsdb-active"; cache_location = "/var/lib/loki/tsdb-cache"; }; }; compactor = { working_directory = "/var/lib/loki"; }; limits_config = { allow_structured_metadata = true; }; }; }; systemd.services.alloy = { serviceConfig = { SupplementaryGroups = [ "systemd-journal" "www-data" ]; }; }; services.alloy = { enable = true; extraFlags = ["--server.http.listen-addr=0.0.0.0:${toString cfg.alloyUiPort}"]; # TODO: submit PR to nixpkgs so that the alloy config can be specified as a JSON expression configPath = pkgs.writeText "config.alloy" '' loki.source.journal "journal" { max_age = "12h0m0s" relabel_rules = discovery.relabel.journal.rules forward_to = [loki.process.journal.receiver] labels = { host = "tente", job = "systemd-journal", } } loki.process "journal" { forward_to = [loki.write.default.receiver] stage.match { // Select messages from systemd services that have LogExtraFields=log_format=logfmt. selector = "{__journal_log_format=\"logfmt\"}" stage.logfmt { mapping = { time = "", level = "" } } stage.timestamp { source = "time" format = "RFC3339" } stage.structured_metadata { values = { level = "" } } } } discovery.relabel "journal" { targets = [] rule { source_labels = ["__journal__systemd_unit"] target_label = "unit" } } loki.source.file "nginx_access" { targets = local.file_match.nginx_access.targets forward_to = [loki.process.nginx_access.receiver] } local.file_match "nginx_access" { path_targets = [{ __path__ = "/var/log/nginx/*.access.log", }] } loki.process "nginx_access" { forward_to = [loki.write.default.receiver] stage.static_labels { values = { job = "nginx", } } // Extracting the log file name as vhost because it's more convenient // to query for than the full filename. We could also use server_name // but there could be wildcard server_names and Loki labels should have // a low cardinality for performance reasons. stage.regex { source = "filename" expression = "(?P[^/]+)\\.access\\.log$" } stage.labels { values = { vhost = "", } } stage.json { expressions = { "msec" = "", path = "" } } stage.timestamp { source = "msec" format = "Unix" } // Setting level=info to prevent Loki's log level detection from wrongly // detecting messages with paths containing "error" as errors. // Creating the filetype entry via stage.template because there's no // static_structured_metadata stage yet. (https://github.com/grafana/loki/issues/16703) stage.template { source = "level" template = "info" } stage.structured_metadata { values = { level = "" } } stage.labels { values = { // Temporarily adding path as a label so that we can use it in the match selectors. path = "", } } stage.match { selector = "{path=~\"/\\\\.well-known/.*\"}" // Creating the filetype entry via stage.template because there's no // static_structured_metadata stage yet. (https://github.com/grafana/loki/issues/16703) stage.template { source = "filetype" template = "well-known" } } stage.match { selector = "{path=\"/robots.txt\"}" stage.template { source = "filetype" template = "robots.txt" } } stage.match { selector = "{path=~\".*\\\\.atom$\"}" stage.template { source = "filetype" template = "feed" } } stage.structured_metadata { values = { filetype = "", } } // Dropping path again because it has a too high cardinality for a label. stage.label_drop { values = [ "path" ] } } loki.write "default" { endpoint { url = "http://127.0.0.1:${toString cfg.lokiPort}/loki/api/v1/push" } external_labels = {} } ''; }; }; }