summaryrefslogtreecommitdiffstats
path: root/krebs/2configs/shack
diff options
context:
space:
mode:
authortv <tv@krebsco.de>2020-10-03 13:44:30 +0200
committertv <tv@krebsco.de>2020-10-03 13:44:30 +0200
commitd1e52425e0d5d79a33b11c92cc2afb498075d953 (patch)
tree29277982f014eaae680e006b6afc7fdb42e8d9b2 /krebs/2configs/shack
parent654f64f05935a69607a540f2e8d15619cee9e15e (diff)
parent7e7499d86302d261c8f8404fb34f2ac091318d0e (diff)
Merge remote-tracking branch 'prism/master'
Diffstat (limited to 'krebs/2configs/shack')
-rw-r--r--krebs/2configs/shack/glados/default.nix25
-rw-r--r--krebs/2configs/shack/glados/multi/rollos.nix53
-rw-r--r--krebs/2configs/shack/glados/multi/wasser.nix12
-rw-r--r--krebs/2configs/shack/influx.nix5
-rw-r--r--krebs/2configs/shack/light.shack.nix12
-rw-r--r--krebs/2configs/shack/muell_mail.nix4
-rw-r--r--krebs/2configs/shack/node-light.nix3
-rw-r--r--krebs/2configs/shack/prometheus/alert-rules.nix140
-rw-r--r--krebs/2configs/shack/prometheus/alertmanager-telegram.nix17
-rw-r--r--krebs/2configs/shack/prometheus/server.nix23
-rw-r--r--krebs/2configs/shack/prometheus/templates/shack.tmpl25
11 files changed, 185 insertions, 134 deletions
diff --git a/krebs/2configs/shack/glados/default.nix b/krebs/2configs/shack/glados/default.nix
index 9bf90bca4..f47bca2db 100644
--- a/krebs/2configs/shack/glados/default.nix
+++ b/krebs/2configs/shack/glados/default.nix
@@ -3,6 +3,7 @@ let
shackopen = import ./multi/shackopen.nix;
wasser = import ./multi/wasser.nix;
badair = import ./multi/schlechte_luft.nix;
+ rollos = import ./multi/rollos.nix;
in {
services.nginx.virtualHosts."hass.shack" = {
serverAliases = [ "glados.shack" ];
@@ -62,13 +63,18 @@ in {
];
};
# https://www.home-assistant.io/components/influxdb/
- #influxdb = {
- # database = "hass";
- # tags = {
- # instance = "wolf";
- # source = "hass";
- # };
- #};
+ influxdb = {
+ database = "glados";
+ host = "influx.shack";
+ component_config_glob = {
+ "sensor.*particulate_matter_2_5um_concentration".override_measurement = "2_5um particles";
+ "sensor.*particulate_matter_10_0um_concentration".override_measurement ="10um particles";
+ };
+ tags = {
+ instance = "wolf";
+ source = "glados";
+ };
+ };
esphome = {};
api = {};
mqtt = {
@@ -93,8 +99,7 @@ in {
};
};
switch =
- wasser.switch
- ++ (import ./switch/power.nix)
+ (import ./switch/power.nix)
;
light = [];
media_player = [
@@ -113,6 +118,7 @@ in {
++ (import ./sensors/mate.nix)
++ (import ./sensors/darksky.nix { inherit lib;})
++ shackopen.sensor
+ ++ wasser.sensor
;
air_quality = (import ./sensors/sensemap.nix );
@@ -147,6 +153,7 @@ in {
automation = wasser.automation
++ badair.automation
+ ++ rollos.automation
++ (import ./automation/shack-startup.nix)
++ (import ./automation/party-time.nix)
++ (import ./automation/hass-restart.nix);
diff --git a/krebs/2configs/shack/glados/multi/rollos.nix b/krebs/2configs/shack/glados/multi/rollos.nix
index 1febad525..4e6494936 100644
--- a/krebs/2configs/shack/glados/multi/rollos.nix
+++ b/krebs/2configs/shack/glados/multi/rollos.nix
@@ -1,13 +1,56 @@
+#
+
let
glados = import ../lib;
+ tempsensor = "sensor.dark_sky_temperature";
+ all_covers = [
+ "cover.crafting_rollo"
+ "cover.elab_rollo"
+ "cover.or2_rollo"
+ "cover.retroraum_rollo"
+ ];
in
{
- # LED
- light = [
- ];
- sensor = [
- ];
automation =
[
+ { alias = "Rollos fahren Runter";
+ trigger = [
+ {
+ platform = "numeric_state";
+ entity_id = tempsensor;
+ above = 25;
+ for = "00:30:00";
+ }
+ ];
+ condition =
+ [
+ {
+ condition = "state";
+ entity_id = "sun.sun";
+ state = "above_horizon";
+ }
+ ];
+ action =
+ [
+ { service = "cover.close_cover";
+ entity_id = all_covers;
+ }
+ ];
+ }
+ { alias = "Rollos fahren Hoch";
+ trigger = [
+ {
+ platform = "sun";
+ event = "sunset";
+ }
+ ];
+ condition = [ ];
+ action =
+ [
+ { service = "cover.open_cover";
+ entity_id = all_covers;
+ }
+ ];
+ }
];
}
diff --git a/krebs/2configs/shack/glados/multi/wasser.nix b/krebs/2configs/shack/glados/multi/wasser.nix
index 6f3dc98ad..74ce736a6 100644
--- a/krebs/2configs/shack/glados/multi/wasser.nix
+++ b/krebs/2configs/shack/glados/multi/wasser.nix
@@ -2,13 +2,17 @@
# switch.crafting_giesskanne_relay
let
glados = import ../lib;
- seconds = 10;
+ seconds = 20;
wasser = "switch.crafting_giesskanne_relay";
in
{
- switch = [
- (glados.tasmota.plug { host = "Wasser"; topic = "plug";} )
- ];
+ sensor = map ( entity_id: {
+ platform = "statistics";
+ name = "Statistics for ${entity_id}";
+ inherit entity_id;
+ max_age.minutes = "60";
+ }) ["sensor.crafting_brotbox_soil_moisture"];
+
automation =
[
diff --git a/krebs/2configs/shack/influx.nix b/krebs/2configs/shack/influx.nix
index 92cb24bf3..93d83a59b 100644
--- a/krebs/2configs/shack/influx.nix
+++ b/krebs/2configs/shack/influx.nix
@@ -8,6 +8,11 @@ in
networking.firewall.allowedTCPPorts = [ port ]; # for legacy applications
networking.firewall.allowedUDPPorts = [ collectd-port ];
services.nginx.virtualHosts."influx.shack" = {
+ # Disable constant GET request logging.
+ # $loggable map is defined in 1/wolf
+ extraConfig = ''
+ access_log syslog:server=unix:/dev/log combined if=$loggable;
+ '';
locations."/" = {
proxyPass = "http://localhost:${toString port}/";
};
diff --git a/krebs/2configs/shack/light.shack.nix b/krebs/2configs/shack/light.shack.nix
new file mode 100644
index 000000000..8e01cb1bf
--- /dev/null
+++ b/krebs/2configs/shack/light.shack.nix
@@ -0,0 +1,12 @@
+{ config, pkgs, ... }:
+let
+ light-shack-src = pkgs.fetchgit {
+ url = "https://git.shackspace.de/rz/standby.shack";
+ rev = "e1b90a0a";
+ sha256 = "07fmz63arc5rxa0a3778srwz0jflp4ad6xnwkkc56hwybby0bclh";
+ };
+ web-dir = "${light-shack-src}/client/www/";
+in
+{
+ services.nginx.virtualHosts."light.shack".locations."/".root = web-dir;
+}
diff --git a/krebs/2configs/shack/muell_mail.nix b/krebs/2configs/shack/muell_mail.nix
index 409278954..481564719 100644
--- a/krebs/2configs/shack/muell_mail.nix
+++ b/krebs/2configs/shack/muell_mail.nix
@@ -4,8 +4,8 @@ let
pkg = pkgs.callPackage (
pkgs.fetchgit {
url = "https://git.shackspace.de/rz/muell_mail";
- rev = "57b67c95052d90044137b2c89007a371dc389afd";
- sha256 = "1grkzs6fxjnc2bv4kskj63d5sb4qxz6yyr85nj0da9hn7qkk4jkj";
+ rev = "c3e43687879f95e01a82ef176fa15678543b2eb8";
+ sha256 = "0hgchwam5ma96s2v6mx2jfkh833psadmisjbm3k3153rlxp46frx";
}) { mkYarnPackage = pkgs.yarn2nix-moretea.mkYarnPackage; };
home = "/var/lib/muell_mail";
cfg = toString <secrets/shack/muell_mail.js>;
diff --git a/krebs/2configs/shack/node-light.nix b/krebs/2configs/shack/node-light.nix
index b471f2af5..4a981ea87 100644
--- a/krebs/2configs/shack/node-light.nix
+++ b/krebs/2configs/shack/node-light.nix
@@ -28,6 +28,9 @@ in {
};
services.nginx.virtualHosts."openhab.shack" = {
+ extraConfig = ''
+ access_log syslog:server=unix:/dev/log combined if=$loggable;
+ '';
serverAliases = [ "lightapi.shack" ];
locations."/power/".proxyPass = "http://localhost:${port}/power/";
locations."/lounge/".proxyPass = "http://localhost:${port}/lounge/";
diff --git a/krebs/2configs/shack/prometheus/alert-rules.nix b/krebs/2configs/shack/prometheus/alert-rules.nix
index 096c551ba..1c2d0b1ad 100644
--- a/krebs/2configs/shack/prometheus/alert-rules.nix
+++ b/krebs/2configs/shack/prometheus/alert-rules.nix
@@ -1,102 +1,42 @@
-{ lib }:
-with lib;
-
+{ lib,... }:
let
- deviceFilter = ''device!="ramfs",device!="rpc_pipefs",device!="lxcfs",device!="nsfs",device!="borgfs"'';
-in mapAttrsToList (name: opts: {
- alert = name;
- expr = opts.condition;
- for = opts.time or "2m";
- labels = if (opts.page or true) then { severity = "page"; } else {};
- annotations = {
- summary = opts.summary;
- description = opts.description;
- };
-}) {
- node_down = {
- condition = ''up{job="node"} == 0'';
- summary = "{{$labels.alias}}: Node is down.";
- description = "{{$labels.alias}} has been down for more than 2 minutes.";
- };
- node_systemd_service_failed = {
- condition = ''node_systemd_unit_state{state="failed"} == 1'';
- summary = "{{$labels.alias}}: Service {{$labels.name}} failed to start.";
- description = "{{$labels.alias}} failed to (re)start service {{$labels.name}}.";
- };
- node_filesystem_full_80percent = {
- condition = ''sort(node_filesystem_free_bytes{${deviceFilter}} < node_filesystem_size_bytes{${deviceFilter}} * 0.2) / 1024^3'';
- time = "10m";
- summary = "{{$labels.alias}}: Filesystem is running out of space soon.";
- description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} got less than 20% space left on its filesystem.";
- };
- node_filesystem_full_in_7d = {
- condition = ''predict_linear(node_filesystem_free_bytes{${deviceFilter}}[2d], 7*24*3600) <= 0'';
- time = "1h";
- summary = "{{$labels.alias}}: Filesystem is running out of space in 7 days.";
- description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} is running out of space of in approx. 7 days";
- };
- node_filesystem_full_in_30d = {
- condition = ''predict_linear(node_filesystem_free_bytes{${deviceFilter}}[30d], 30*24*3600) <= 0'';
- time = "1h";
- summary = "{{$labels.alias}}: Filesystem is running out of space in 30 days.";
- description = "{{$labels.alias}} device {{$labels.device}} on {{$labels.mountpoint}} is running out of space of in approx. 30 days";
- };
- node_filedescriptors_full_in_3h = {
- condition = ''predict_linear(node_filefd_allocated[3h], 3*3600) >= node_filefd_maximum'';
- time = "20m";
- summary = "{{$labels.alias}} is running out of available file descriptors in 3 hours.";
- description = "{{$labels.alias}} is running out of available file descriptors in approx. 3 hours";
- };
- node_filedescriptors_full_in_7d = {
- condition = ''predict_linear(node_filefd_allocated[7d], 7*24*3600) >= node_filefd_maximum'';
- time = "1h";
- summary = "{{$labels.alias}} is running out of available file descriptors in 7 days.";
- description = "{{$labels.alias}} is running out of available file descriptors in approx. 7 days";
- };
- node_load15 = {
- condition = ''node_load15 / on(alias) count(node_cpu_seconds_total{mode="system"}) by (alias) >= 1.0'';
- time = "10m";
- summary = "{{$labels.alias}}: Running on high load: {{$value}}";
- description = "{{$labels.alias}} is running with load15 > 1 for at least 5 minutes: {{$value}}";
- };
- node_ram_using_90percent = {
- condition = "node_memory_MemFree_bytes + node_memory_Buffers_bytes + node_memory_Cached_bytes < node_memory_MemTotal_bytes * 0.1";
- time = "1h";
- summary = "{{$labels.alias}}: Using lots of RAM.";
- description = "{{$labels.alias}} is using at least 90% of its RAM for at least 1 hour.";
- };
- node_swap_using_30percent = {
- condition = "node_memory_SwapTotal_bytes - (node_memory_SwapFree_bytes + node_memory_SwapCached_bytes) > node_memory_SwapTotal_bytes * 0.3";
- time = "30m";
- summary = "{{$labels.alias}}: Using more than 30% of its swap.";
- description = "{{$labels.alias}} is using 30% of its swap space for at least 30 minutes.";
- };
- node_visible_confluence_space = {
- condition = "node_visible_confluence_space != 0";
- summary = "crowd prometheus cann see the {{$labels.space_name}} confluence space!";
- description = "crowd user `prometheus` can see the `{{$labels.space_name}}` confluence space.";
- };
- node_hwmon_temp = {
- condition = "node_hwmon_temp_celsius > node_hwmon_temp_crit_celsius*0.9 OR node_hwmon_temp_celsius > node_hwmon_temp_max_celsius*0.95";
- time = "5m";
- summary = "{{$labels.alias}}: Sensor {{$labels.sensor}}/{{$labels.chip}} temp is high: {{$value}} ";
- description = "{{$labels.alias}} reports hwmon sensor {{$labels.sensor}}/{{$labels.chip}} temperature value is nearly critical: {{$value}}";
- };
- node_conntrack_limit = {
- condition = "node_nf_conntrack_entries_limit - node_nf_conntrack_entries < 1000";
- time = "5m";
- summary = "{{$labels.alias}}: Number of tracked connections high";
- description = "{{$labels.alias}} has only {{$value}} free slots for connection tracking available.";
- };
- node_reboot = {
- condition = "time() - node_boot_time_seconds < 300";
- summary = "{{$labels.alias}}: Reboot";
- description = "{{$labels.alias}} just rebooted.";
- };
- node_uptime = {
- condition = "time() - node_boot_time_seconds > 2592000";
- page = false;
- summary = "{{$labels.alias}}: Uptime monster";
- description = "{{$labels.alias}} has been up for more than 30 days.";
- };
+ disk_free_threshold = "10"; # at least this much free disk percentage
+in {
+ services.prometheus.rules = [(builtins.toJSON
+ {
+ groups = [
+ { name = "shack-env";
+ rules = [
+ {
+ alert = "RootPartitionFull";
+ for = "30m";
+ expr = ''(node_filesystem_avail_bytes{alias="wolf.shack",mountpoint="/"} * 100) / node_filesystem_size_bytes{alias="wolf.shack",mountpoint="/"} < ${disk_free_threshold}'';
+ labels.severity = "warning";
+ annotations.summary = "{{ $labels.alias }} root disk full";
+ annotations.url = "http://grafana.shack/d/hb7fSE0Zz/shack-system-dashboard?orgId=1&var-job=node&var-hostname=All&var-node=wolf.shack:9100&var-device=All&var-maxmount=%2F&var-show_hostname=wolf";
+ annotations.description = ''The root disk of {{ $labels.alias }} has {{ $value | printf "%.2f" }}% free disk space (Threshold at ${disk_free_threshold}%).A vast number of shackspace services will stop working. CI for deploying new configuration will also seize working. Log in to the system and run `nix-collect-garbage -d` and clean up the shack share folder in `/home/share` .If this does not help you can check `du -hs /var/ | sort -h`, run `docker system prune` or if you are really desperate run `du -hs / | sort -h` and go through the folders recursively until you've found something to delete'';
+ }
+ {
+ alert = "RootPartitionFull";
+ for = "30m";
+ expr = ''(node_filesystem_avail_bytes{alias="puyak.shack",mountpoint="/"} * 100) / node_filesystem_size_bytes{alias="puyak.shack",mountpoint="/"} < ${disk_free_threshold}'';
+ labels.severity = "warning";
+ annotations.summary = "{{ $labels.alias }} root disk full";
+ annotations.url = "http://grafana.shack/d/hb7fSE0Zz/shack-system-dashboard?orgId=1&var-job=node&var-hostname=All&var-node=wolf.shack:9100&var-device=All&var-maxmount=%2F&var-show_hostname=puyak";
+ annotations.description = ''The root disk of {{ $labels.alias }} has {{ $value | printf "%.2f" }}% free disk space (Threshold at ${disk_free_threshold}%).Prometheus will not be able to create new alerts and CI for deploying new configuration will also seize working. Log in to the system and run `nix-collect-garbage -d` and if this does not help you can check `du -hs /var/ | sort -h`, run `docker system prune` or if you are really desperate run `du -hs / | sort -h` and go through the folders recursively until you've found something to delete'';
+ }
+ {
+ alert = "HostDown";
+ expr = ''up{alias="wolf.shack"} == 0'';
+ for = "5m";
+ labels.severity = "page";
+ annotations.summary = "Instance {{ $labels.alias }} down for 5 minutes";
+ annotations.url = "http://grafana.shack/d/hb7fSE0Zz/shack-system-dashboard?orgId=1&var-job=node&var-hostname=All&var-node=wolf.shack:9100&var-device=All&var-maxmount=%2F&var-show_hostname=wolf";
+ annotations.description = ''Host {{ $labels.alias }} went down and has not been reconnected after 5 minutes. This is probably bad news, try to restart the host via naproxen ( http://naproxen.shack:8006 ). Wolf being down means that CI,glados automation, light management and a couple of other services will not work anymore.'';
+ }
+ ];
+ }
+ ];
+ }
+ )];
}
diff --git a/krebs/2configs/shack/prometheus/alertmanager-telegram.nix b/krebs/2configs/shack/prometheus/alertmanager-telegram.nix
new file mode 100644
index 000000000..8527001cb
--- /dev/null
+++ b/krebs/2configs/shack/prometheus/alertmanager-telegram.nix
@@ -0,0 +1,17 @@
+{ pkgs, ...}:
+{
+ systemd.services.alertmanager-bot-telegram = {
+ wantedBy = [ "multi-user.target" ];
+ after = [ "ip-up.target" ];
+ serviceConfig = {
+ EnvironmentFile = toString <secrets/shack/telegram_bot.env>;
+ DynamicUser = true;
+ StateDirectory = "alertbot";
+ ExecStart = ''${pkgs.alertmanager-bot-telegram}/bin/alertmanager-bot \
+ --alertmanager.url=http://alert.prometheus.shack --log.level=info \
+ --store=bolt --bolt.path=/var/lib/alertbot/bot.db \
+ --listen.addr="0.0.0.0:16320" \
+ --template.paths=${./templates}/shack.tmpl'';
+ };
+ };
+}
diff --git a/krebs/2configs/shack/prometheus/server.nix b/krebs/2configs/shack/prometheus/server.nix
index c088a3b08..9e4b4d1a7 100644
--- a/krebs/2configs/shack/prometheus/server.nix
+++ b/krebs/2configs/shack/prometheus/server.nix
@@ -1,6 +1,9 @@
{ pkgs, lib, config, ... }:
# from https://gist.github.com/globin/02496fd10a96a36f092a8e7ea0e6c7dd
{
+ imports = [
+ ./alert-rules.nix
+ ];
networking = {
firewall.allowedTCPPorts = [
9090 # prometheus
@@ -18,12 +21,6 @@
};
prometheus = {
enable = true;
- ruleFiles = lib.singleton (pkgs.writeText "prometheus-rules.yml" (builtins.toJSON {
- groups = lib.singleton {
- name = "mf-alerting-rules";
- rules = import ./alert-rules.nix { inherit lib; };
- };
- }));
scrapeConfigs = [
{
job_name = "node";
@@ -118,7 +115,10 @@
];
alertmanager = {
enable = true;
- listenAddress = "0.0.0.0";
+ listenAddress = "127.0.0.1";
+ webExternalUrl = "http://alert.prometheus.shack";
+ logLevel = "debug";
+
configuration = {
"global" = {
"smtp_smarthost" = "smtp.example.com:587";
@@ -134,15 +134,10 @@
"receivers" = [
{
"name" = "team-admins";
- "email_configs" = [
- {
- "to" = "devnull@example.com";
- "send_resolved" = true;
- }
- ];
+ "email_configs" = [ ];
"webhook_configs" = [
{
- "url" = "https://example.com/prometheus-alerts";
+ "url" = "http://localhost:16320";
"send_resolved" = true;
}
];
diff --git a/krebs/2configs/shack/prometheus/templates/shack.tmpl b/krebs/2configs/shack/prometheus/templates/shack.tmpl
new file mode 100644
index 000000000..9295f019f
--- /dev/null
+++ b/krebs/2configs/shack/prometheus/templates/shack.tmpl
@@ -0,0 +1,25 @@
+{{ define "telegram.default" }}
+{{range .Alerts -}}
+{{ $severity := index .Labels "severity" }}
+{{ $desc := "No Description" }}
+{{ if eq .Status "firing" }}
+ {{ $desc = index .Annotations "description" }}
+ {{- if eq $severity "critical" -}}
+ <i><u><b>[CRITICAL]</b></u></i>
+ {{- else if eq $severity "warning" -}}
+ <u><b>[WARNING]</b></u>
+ {{- else -}}
+ <b>[{{ $severity }}]</b>
+ {{- end -}}
+{{ else -}}
+ {{ $desc = "The issue has been resolved" }}
+ <del>[RESOLVED]</del>
+{{- end }} {{ index .Labels "alertname"}}: {{ index .Annotations "summary"}}
+
+{{ $desc }}
+
+Alert Links:
+* <a href="{{ index .Annotations "url"}}">Grafana</a>
+* <a href="{{ .GeneratorURL }}">Source</a>
+{{end -}}
+{{end}}