Mercurial > code > home > repos > victoriametrics
changeset 19:10017def57ce
update scrapes config
author | drewp@bigasterisk.com |
---|---|
date | Sat, 24 Jun 2023 01:43:55 -0700 |
parents | 92dce2fa8954 |
children | f5777b65f035 |
files | config/scrape_main.yaml |
diffstat | 1 files changed, 24 insertions(+), 42 deletions(-) [+] |
line wrap: on
line diff
--- a/config/scrape_main.yaml Sat Jun 24 01:43:21 2023 -0700 +++ b/config/scrape_main.yaml Sat Jun 24 01:43:55 2023 -0700 @@ -11,12 +11,6 @@ scrape_configs: # some based on https://github.com/prometheus/prometheus/blob/main/documentation/examples/prometheus-kubernetes.yml - # - job_name: "vmalert" - # metrics_path: /vmalert/metrics - # static_configs: - # - targets: - # - vmalert.default.svc.cluster.local - - job_name: "kubernetes-apiservers" scheme: https tls_config: { ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt } @@ -63,9 +57,9 @@ regex: "false" action: drop - - source_labels: [__meta_kubernetes_namespace] - regex: default - action: keep + # - source_labels: [__meta_kubernetes_namespace] + # regex: default + # action: keep # promote these to display - source_labels: [__meta_kubernetes_service_name] @@ -92,32 +86,31 @@ replacement: "/m/metrics" # discovery is matching extra ports that don't serve metrics- remove these targets + - {if: '{job="cert-manager-webhook"}', action: drop} + - {if: '{job="cert-manager", __port_number="9403"}', action: drop} + - {if: '{job="filesync-syncthing",__port_number="8384"}', action: drop} + - {if: '{job="jsregistry", __port_number="4873"}', action: drop} + - {if: '{job="kube-dns", __port_number="53"}', action: drop} - {if: '{job="kubernetes"}', action: drop} - {if: '{job="mongodb", __port_number="27017"}', action: drop} - {if: '{job="mosquitto-ext", __port_number="1883"}', action: drop} - - {if: '{job="filesync-syncthing",__port_number="8384"}', action: drop} - - {if: '{job="jsregistry", __port_number="4873"}', action: drop} + - {if: '{job="net-route-input", __port_number="80"}', action: drop} - {if: '{job="photoprism", __port_number="2342"}', action: drop} - - {if: '{job="net-route-input", __port_number="80"}', action: drop} + - {if: '{job="pomerium-metrics", __port_number="8080"}', action: drop} + - {if: '{job="pomerium-metrics", __port_number="8443"}', action: drop} + - {if: '{job="pomerium-proxy", __port_number="8080"}', action: drop} + - {if: '{job="pomerium-proxy", __port_number="8443"}', action: drop} + + # Needs https. Used by `kubectl top` + - {if: '{job="metrics-server", __port_number="4443"}', action: drop} # discovery is also matching redundant container ports that it also catches with the service - - {if: '{job="lanscape", __port_number="8001"}', action: drop} - - {if: '{job="lanscape", __port_number="8002"}', action: drop} - - {if: '{job="collector", __port_number="8001"}', action: drop} - - {if: '{job="collector", __port_number="8002"}', action: drop} - - {if: '{job="racc-console", __port_number="8002"}', action: drop} - - {if: '{job="antigen-web", __port_number="8001"}', action: drop} - - - # # seems like this would match more stuff, but all I get is coredns - # - job_name: 'old_coredns' - # kubernetes_sd_configs: [{role: pod}] - # relabel_configs: - # - source_labels: [__meta_kubernetes_pod_container_port_name] - # regex: metrics - # action: keep - # - source_labels: [__meta_kubernetes_pod_container_name] - # target_label: job + - {if: '{job="antigen-web", __port_number="8001"}', action: drop} + - {if: '{job="collector", __port_number="8001"}', action: drop} + - {if: '{job="collector", __port_number="8002"}', action: drop} + - {if: '{job="lanscape", __port_number="8001"}', action: drop} + - {if: '{job="lanscape", __port_number="8002"}', action: drop} + - {if: '{job="racc-console", __port_number="8002"}', action: drop} - job_name: "telegraf" scheme: http @@ -155,18 +148,11 @@ - targets: - pipe:9991 - - job_name: "pomerium" - static_configs: - - targets: - - pomerium-metrics.pomerium.svc:9090 - - cert-manager.cert-manager.svc:9402 - - - job_name: "zfs" scrape_interval: 1h static_configs: - targets: - # running in in k8s, but not in SD + # running in in k8s, but as daemonset so it's not in SD above - ditto:9634 - ditto:9986 @@ -179,16 +165,12 @@ - targets: # printer, since it falls out of ntop with no traffic at all. Or, we could poll ink status at http://10.2.0.37/general/status.html?pageid=1 - printer014032ED - # frontbed, for monitoring -# - 10.5.0.17 # too flaky - # asher bulb, not sure why it sleeps so long - - bulb1 - relabel_configs: - {source_labels: [__address__], target_label: __param_target} - {source_labels: [__param_target], target_label: instance} - target_label: __address__ replacement: prober + - job_name: "racc" scrape_interval: 30s static_configs: