diff config/scrape_config.yaml @ 4:1eb6e6a2b9b6

version control configs finally; use configmaps to present them to VM
author drewp@bigasterisk.com
date Sun, 12 Jun 2022 17:08:31 -0700
parents
children
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/config/scrape_config.yaml	Sun Jun 12 17:08:31 2022 -0700
@@ -0,0 +1,151 @@
+global:
+  scrape_interval: 1m
+  scrape_timeout: 10s
+
+scrape_config_files:
+  - scrape_ssl.yaml
+# These can even be urls: https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmagent/README.md#loading-scrape-configs-from-multiple-files
+
+scrape_configs:
+  # some based on https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus-kubernetes.yml
+
+  - job_name: "victoriametrics"
+    metrics_path: /m/metrics
+    static_configs:
+      - targets:
+          - victoriametrics.default.svc.cluster.local
+
+  - job_name: "vmalert"
+    metrics_path: /vmalert/metrics
+    static_configs:
+      - targets:
+          - vmalert.default.svc.cluster.local
+
+  - job_name: "kubernetes-apiservers"
+    scheme: https
+    tls_config: { ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt }
+    bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+    kubernetes_sd_configs: [{ role: endpoints }]
+
+    relabel_configs:
+      - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+        action: keep
+        regex: default;kubernetes;https
+
+  - job_name: "kubernetes-nodes"
+    scheme: https
+    tls_config: { ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt }
+    bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+    kubernetes_sd_configs: [{ role: node }]
+
+    relabel_configs:
+      - action: labelmap
+        regex: __meta_kubernetes_node_label_(.+)
+
+  # see https://github.com/google/cadvisor/blob/master/docs/storage/prometheus.md
+  # for metric definitions
+  - job_name: "kubernetes-cadvisor"
+    scheme: https
+    metrics_path: /metrics/cadvisor
+    tls_config: { ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt }
+    bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+    kubernetes_sd_configs: [{ role: node }]
+
+    relabel_configs:
+      - action: labelmap
+        regex: __meta_kubernetes_node_label_(.+)
+
+  - job_name: "kube-state-metrics"
+    static_configs:
+      - targets:
+          - kube-state-metrics.kube-system.svc.cluster.local:8080
+          - kube-state-metrics.kube-system.svc.cluster.local:8081
+
+  - job_name: "k8services"
+    kubernetes_sd_configs: [{ role: endpoints }]
+    relabel_configs:
+      # To omit a service, add this at pod-level (Deployment.spec.template.metadata.annotations):
+      #   annotations: { prometheus.io/scrape: "false" }
+      - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
+        regex: false
+        action: drop
+
+      - source_labels: [__meta_kubernetes_service_name]
+        regex: kubernetes
+        action: drop
+
+      - source_labels: [__meta_kubernetes_namespace]
+        regex: default
+        action: keep
+
+      - source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_pod_container_port_number]
+        regex: "mitmproxy;1008[01]"
+        action: drop
+
+      - source_labels: [__meta_kubernetes_service_name]
+        target_label: job
+
+      - source_labels: [__meta_kubernetes_pod_node_name]
+        target_label: node
+
+      - source_labels: [__meta_kubernetes_pod_label_app, __meta_kubernetes_pod_container_port_number]
+        action: drop
+        regex: jsregistry;4873
+
+      - source_labels: [__meta_kubernetes_pod_label_app, __meta_kubernetes_pod_container_port_number]
+        action: drop
+        regex: mosquitto-ext;1883
+
+  # # seems like this would match more stuff, but all I get is coredns
+  # - job_name: 'old_coredns'
+  #   kubernetes_sd_configs: [{role: pod}]
+  #   relabel_configs:
+  #     - source_labels: [__meta_kubernetes_pod_container_port_name]
+  #       regex: metrics
+  #       action: keep
+  #     - source_labels: [__meta_kubernetes_pod_container_name]
+  #       target_label: job
+
+  - job_name: "telegraf"
+    scheme: http
+    kubernetes_sd_configs: [{ role: node }]
+    relabel_configs:
+      - source_labels: [__address__]
+        regex: "(.*):(\\d+)"
+        target_label: __address__
+        replacement: "${1}:9273"
+        action: replace
+
+  - job_name: "ntop"
+    metrics_path: /lua/local/lanscape/main.lua
+    static_configs:
+      - targets:
+          - 10.5.0.1:3000
+
+  - job_name: "net-routes"
+    static_configs:
+      - targets:
+          - 10.2.0.3:10001
+
+  - job_name: "ping"
+    scrape_interval: 2m
+    metrics_path: /probe
+    params:
+      module: [icmp]
+    static_configs:
+      - targets:
+          # printer, since it falls out of ntop with no traffic at all. Or, we could poll ink status at http://10.2.0.37/general/status.html?pageid=1
+          - 10.2.0.37
+          # frontbed, for monitoring
+          - 10.5.0.17
+
+    relabel_configs:
+      - source_labels: [__address__]
+        target_label: __param_target
+      - source_labels: [__param_target]
+        target_label: instance
+      - target_label: __address__
+        replacement: prober