Mercurial > code > home > repos > victoriametrics
annotate alert_rules.py @ 31:d39a8038227b
reformat
author | drewp@bigasterisk.com |
---|---|
date | Wed, 19 Jul 2023 21:27:46 -0700 |
parents | e114edff93dc |
children | eb1de82c93aa |
rev | line source |
---|---|
23 | 1 """ |
2 pdm run invoke push-config | |
3 | |
4 docs: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ | |
5 "Whenever the alert expression results in one or more vector | |
6 elements at a given point in time, the alert counts as active for | |
7 these elements' label sets." | |
8 also https://www.metricfire.com/blog/top-5-prometheus-alertmanager-gotchas/#Missing-metrics | |
9 | |
10 """ | |
11 | |
12 import json | |
13 | |
14 | |
15 def k8sRules(): | |
16 # from https://awesome-prometheus-alerts.grep.to/rules.html | |
17 return [ | |
18 { | |
19 "alert": "PrometheusTargetMissing", | |
20 "expr": "up == 0", | |
31 | 21 "labels": { |
22 "severity": "critical" | |
23 }, | |
23 | 24 "annotations": { |
25 "summary": "Prometheus target missing (instance {{ $labels.instance }})", | |
26 "description": "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}", | |
27 }, | |
28 }, | |
29 { | |
30 "alert": "KubernetesMemoryPressure", | |
31 "expr": 'kube_node_status_condition{condition="MemoryPressure",status="true"} == 1', | |
32 "for": "2m", | |
31 | 33 "labels": { |
34 "severity": "critical" | |
35 }, | |
23 | 36 "annotations": { |
37 "summary": "Kubernetes memory pressure (instance {{ $labels.instance }})", | |
38 "description": "{{ $labels.node }} has MemoryPressure condition\n VALUE = {{ $value }}", | |
39 }, | |
40 }, | |
41 { | |
42 "alert": "KubernetesDiskPressure", | |
43 "expr": 'kube_node_status_condition{condition="DiskPressure",status="true"} == 1', | |
44 "for": "2m", | |
31 | 45 "labels": { |
46 "severity": "critical" | |
47 }, | |
23 | 48 "annotations": { |
49 "summary": "Kubernetes disk pressure (instance {{ $labels.instance }})", | |
50 "description": "{{ $labels.node }} has DiskPressure condition\n VALUE = {{ $value }}", | |
51 }, | |
52 }, | |
53 { | |
54 "alert": "KubernetesOutOfDisk", | |
55 "expr": 'kube_node_status_condition{condition="OutOfDisk",status="true"} == 1', | |
56 "for": "2m", | |
31 | 57 "labels": { |
58 "severity": "critical" | |
59 }, | |
23 | 60 "annotations": { |
61 "summary": "Kubernetes out of disk (instance {{ $labels.instance }})", | |
62 "description": "{{ $labels.node }} has OutOfDisk condition\n VALUE = {{ $value }}", | |
63 }, | |
64 }, | |
65 { | |
66 "alert": "KubernetesJobFailed", | |
67 "expr": "kube_job_status_failed > 0", | |
31 | 68 "labels": { |
69 "severity": "warning" | |
70 }, | |
23 | 71 "annotations": { |
72 "summary": "Kubernetes Job failed (instance {{ $labels.instance }})", | |
73 "description": "Job {{$labels.namespace}}/{{$labels.exported_job}} failed to complete\n VALUE = {{ $value }}", | |
74 }, | |
75 }, | |
76 { | |
77 "alert": "KubernetesPodCrashLooping", | |
78 "expr": "increase(kube_pod_container_status_restarts_total[1m]) > 3", | |
79 "for": "2m", | |
31 | 80 "labels": { |
81 "severity": "warning" | |
82 }, | |
23 | 83 "annotations": { |
84 "summary": "Kubernetes pod crash looping (instance {{ $labels.instance }})", | |
85 "description": "Pod {{ $labels.pod }} is crash looping\n VALUE = {{ $value }}", | |
86 }, | |
87 }, | |
88 { | |
31 | 89 "alert": |
90 "KubernetesClientCertificateExpiresNextWeek", | |
91 "expr": | |
92 'apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 7*24*60*60', | |
93 "labels": { | |
94 "severity": "warning" | |
95 }, | |
23 | 96 "annotations": { |
97 "summary": "Kubernetes client certificate expires next week (instance {{ $labels.instance }})", | |
98 "description": "A client certificate used to authenticate to the apiserver is expiring next week.\n VALUE = {{ $value }}", | |
99 }, | |
100 }, | |
101 { | |
102 "alert": "container_waiting", | |
103 "expr": "sum by (container)(kube_pod_container_status_waiting!=0)", | |
104 "for": "2m", | |
105 }, | |
106 ] | |
107 | |
108 | |
109 def allRules(): | |
110 return { | |
111 "groups": [ | |
112 { | |
113 "name": "k8s", | |
28
e114edff93dc
more explicit intervals. try to get a single day of notification out of a disk err increase
drewp@bigasterisk.com
parents:
27
diff
changeset
|
114 "interval": "1m", |
23 | 115 "rules": k8sRules(), |
116 }, | |
117 # | |
118 # any presence of starlette_request_duration_seconds_created{app_name="starlette",method="GET",path="/",status_code="200"} 1.6460176156784086e+09 means someone forgot to set app name | |
119 { | |
31 | 120 "name": |
121 "Outages", | |
122 "interval": | |
123 "1m", | |
23 | 124 "rules": [ |
125 { | |
126 "alert": "powereagleStalled", | |
127 "expr": "rate(house_power_w[100m]) == 0", | |
128 "for": "0m", | |
31 | 129 "labels": { |
130 "severity": "losingData" | |
131 }, | |
23 | 132 "annotations": { |
133 "summary": "power eagle data stalled", | |
134 "description": "logs at https://bigasterisk.com/k/clusters/local/namespaces/default/deployments/power-eagle/logs", | |
135 }, | |
136 }, | |
137 { | |
138 "alert": "powereagleAbsent", | |
139 "expr": "absent_over_time(house_power_w[5m])", | |
140 "for": "2m", | |
31 | 141 "labels": { |
142 "severity": "losingData" | |
143 }, | |
23 | 144 "annotations": { |
145 "summary": "power eagle data missing", | |
146 "description": "logs at https://bigasterisk.com/k/clusters/local/namespaces/default/deployments/power-eagle/logs", | |
147 }, | |
148 }, | |
149 { | |
150 "alert": "absent_zigbee", | |
151 "expr": 'absent(container_last_seen{container="zigbee2mqtt"})', | |
152 }, | |
153 { | |
154 "alert": "net_routes_sync", | |
155 "expr": 'rate(starlette_request_duration_seconds_count{app_name="net_routes",path="/routes"}[5m]) < 1/70', | |
156 "for": "10m", | |
31 | 157 "labels": { |
158 "severity": "houseUsersAffected" | |
159 }, | |
23 | 160 "annotations": { |
161 "summary": "net_routes is not getting regular updates" | |
162 }, | |
163 }, | |
164 ], | |
165 }, | |
166 { | |
28
e114edff93dc
more explicit intervals. try to get a single day of notification out of a disk err increase
drewp@bigasterisk.com
parents:
27
diff
changeset
|
167 "name": "disk_errs", |
e114edff93dc
more explicit intervals. try to get a single day of notification out of a disk err increase
drewp@bigasterisk.com
parents:
27
diff
changeset
|
168 "interval": "2d", |
31 | 169 "rules": [{ |
170 "alert": "zpool_device_error_count", | |
171 "labels": { | |
172 "severity": "warning" | |
28
e114edff93dc
more explicit intervals. try to get a single day of notification out of a disk err increase
drewp@bigasterisk.com
parents:
27
diff
changeset
|
173 }, |
31 | 174 "expr": 'increase(zpool_device_error_count[3d]) > 0', |
175 }], | |
28
e114edff93dc
more explicit intervals. try to get a single day of notification out of a disk err increase
drewp@bigasterisk.com
parents:
27
diff
changeset
|
176 }, |
e114edff93dc
more explicit intervals. try to get a single day of notification out of a disk err increase
drewp@bigasterisk.com
parents:
27
diff
changeset
|
177 { |
31 | 178 "name": |
179 "alerts", | |
23 | 180 "rules": [ |
181 { | |
182 "alert": "kube_node_status_bad_condition", | |
183 "for": "2h", | |
31 | 184 "labels": { |
185 "severity": "warning" | |
186 }, | |
23 | 187 "expr": 'kube_node_status_condition{condition=~".*Pressure",status="true"} > 0', |
188 }, | |
189 { | |
190 "alert": "housePower", | |
28
e114edff93dc
more explicit intervals. try to get a single day of notification out of a disk err increase
drewp@bigasterisk.com
parents:
27
diff
changeset
|
191 "for": "1h", |
31 | 192 "labels": { |
193 "severity": "waste" | |
194 }, | |
23 | 195 "expr": "house_power_w > 4000", |
31 | 196 "annotations": { |
197 "summary": "house power usage over 4KW" | |
198 }, | |
23 | 199 }, |
200 { | |
201 "alert": "host_root_fs_space_low", | |
202 "for": "20m", | |
31 | 203 "labels": { |
204 "severity": "warning" | |
205 }, | |
23 | 206 "expr": 'disk_free{path="/"} < 20G', |
207 }, | |
208 { | |
209 "alert": "zpool_space_low", | |
210 "for": "20m", | |
31 | 211 "labels": { |
212 "severity": "warning" | |
213 }, | |
23 | 214 "expr": 'last_over_time(zfs_pool_free_bytes{pool="stor7"}[1h]) < 100G', |
215 }, | |
216 { | |
217 "alert": "disk_week_incr", | |
218 "for": "20m", | |
31 | 219 "labels": { |
220 "severity": "warning" | |
221 }, | |
23 | 222 "expr": 'round(increase(disk_used{path=~"/my/.*"}[1d])/1M) > 5000', |
31 | 223 "annotations": { |
224 "summary": "high mb/week on zfs dir" | |
225 }, | |
23 | 226 }, |
227 { | |
228 "alert": "high_logging", | |
31 | 229 "for": "3h", |
230 "labels": { | |
231 "severity": "waste" | |
232 }, | |
23 | 233 "expr": "sum by (container) (rate(kubelet_container_log_filesystem_used_bytes[3h])) > 4k", |
31 | 234 "annotations": { |
235 "summary": "high log output rate" | |
236 }, | |
23 | 237 }, |
238 { | |
239 "alert": "stale_process", | |
240 "for": "1d", | |
31 | 241 "labels": { |
242 "severity": "dataRisk" | |
243 }, | |
23 | 244 "expr": "round((time() - filestat_modification_time/1e9) / 86400) > 14", |
31 | 245 "annotations": { |
246 "summary": "process time is old" | |
247 }, | |
23 | 248 }, |
249 { | |
250 "alert": "starlette", | |
251 "for": "1m", | |
31 | 252 "labels": { |
253 "severity": "fix" | |
254 }, | |
23 | 255 "expr": 'starlette_request_duration_seconds_created{app_name="starlette"}', |
31 | 256 "annotations": { |
257 "summary": "set starlette app name" | |
258 }, | |
23 | 259 }, |
260 { | |
261 "alert": "ssl_certs_expiring_soon", | |
262 "expr": "min((min_over_time(probe_ssl_earliest_cert_expiry[1d])-time())/86400) < 10", | |
31 | 263 "labels": { |
264 "severity": "warning" | |
265 }, | |
23 | 266 "annotations": { |
267 "summary": "cert expiring soon. See https://bigasterisk.com/grafana/d/z1YtDa3Gz/certs?orgId=1\nVALUE = {{ $value }}" | |
268 }, | |
269 }, | |
270 ], | |
271 }, | |
272 ] | |
273 } | |
274 | |
275 | |
276 def _runJson(ctx, cmd): | |
277 return json.loads(ctx.run(cmd, hide="stdout").stdout) | |
278 | |
279 | |
280 def hostsExpectedOnline(ctx): | |
281 return _runJson(ctx, "cd /my/serv/lanscape; pdm run python hosts_expected_online.py") | |
282 | |
283 | |
284 def expectedK8sNodes(ctx): | |
285 getNode = _runJson(ctx, "kubectl get node -o json") | |
286 hosts = [item["metadata"]["name"] for item in getNode["items"]] | |
287 optionalHosts = {'slash'} | |
288 return { | |
289 "groups": [ | |
290 { | |
291 "name": "k8s_expected_nodes", | |
292 "rules": [ | |
293 { | |
294 "alert": "kube_node_log_size_report_" + h, | |
295 "expr": 'absent(kubelet_container_log_filesystem_used_bytes{instance="%s"})' | |
296 % h, | |
297 "for": "1h", | |
298 "annotations": { | |
299 "summary": f"no recent k8s log size report from host {h}" | |
300 }, | |
301 } | |
302 for h in hosts if not h in optionalHosts | |
303 ], | |
304 } | |
305 ] | |
306 } |