Targets


monitoring/ceph-metrics-sm/0 (0/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.5.101:9283/metrics
down endpoint="metrics" instance="192.168.5.101:9283" job="ceph-metrics" namespace="monitoring" service="ceph-metrics" 6.694s ago 849.4us Get http://192.168.5.101:9283/metrics: dial tcp 192.168.5.101:9283: connect: connection refused
http://192.168.5.102:9283/metrics
down endpoint="metrics" instance="192.168.5.102:9283" job="ceph-metrics" namespace="monitoring" service="ceph-metrics" 7.043s ago 1.066ms Get http://192.168.5.102:9283/metrics: dial tcp 192.168.5.102:9283: connect: connection refused
http://192.168.5.103:9283/metrics
down endpoint="metrics" instance="192.168.5.103:9283" job="ceph-metrics" namespace="monitoring" service="ceph-metrics" 1.721s ago 568.4us Get http://192.168.5.103:9283/metrics: dial tcp 192.168.5.103:9283: connect: connection refused

monitoring/prometheus-prometheus-oper-alertmanager/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.244.0.29:9093/metrics
up endpoint="web" instance="10.244.0.29:9093" job="prometheus-prometheus-oper-alertmanager" namespace="monitoring" pod="alertmanager-prometheus-prometheus-oper-alertmanager-0" service="prometheus-prometheus-oper-alertmanager" 26.874s ago 10.12ms

monitoring/prometheus-prometheus-oper-apiserver/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.5.101:6443/metrics
up endpoint="https" instance="192.168.5.101:6443" job="apiserver" namespace="default" service="kubernetes" 19.306s ago 120ms
https://192.168.5.102:6443/metrics
up endpoint="https" instance="192.168.5.102:6443" job="apiserver" namespace="default" service="kubernetes" 1.875s ago 208.3ms
https://192.168.5.103:6443/metrics
up endpoint="https" instance="192.168.5.103:6443" job="apiserver" namespace="default" service="kubernetes" 14.58s ago 186.2ms

monitoring/prometheus-prometheus-oper-coredns/0 (2/2 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.244.2.82:9153/metrics
up endpoint="http-metrics" instance="10.244.2.82:9153" job="coredns" namespace="kube-system" pod="coredns-74ff55c5b-v7zk5" service="prometheus-prometheus-oper-coredns" 3.127s ago 11.41ms
http://10.244.2.83:9153/metrics
up endpoint="http-metrics" instance="10.244.2.83:9153" job="coredns" namespace="kube-system" pod="coredns-74ff55c5b-8zmwc" service="prometheus-prometheus-oper-coredns" 23.502s ago 9.336ms

monitoring/prometheus-prometheus-oper-grafana/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.244.0.80:3000/metrics
up endpoint="service" instance="10.244.0.80:3000" job="prometheus-grafana" namespace="monitoring" pod="prometheus-grafana-7bfc4cc467-bqf8x" service="prometheus-grafana" 21.293s ago 7.455ms

monitoring/prometheus-prometheus-oper-kube-controller-manager/0 (0/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.5.101:10252/metrics
down endpoint="http-metrics" instance="192.168.5.101:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-socialboost1" service="prometheus-prometheus-oper-kube-controller-manager" 27.368s ago 859us Get http://192.168.5.101:10252/metrics: dial tcp 192.168.5.101:10252: connect: connection refused
http://192.168.5.102:10252/metrics
down endpoint="http-metrics" instance="192.168.5.102:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-socialboost2" service="prometheus-prometheus-oper-kube-controller-manager" 8.672s ago 1.13ms Get http://192.168.5.102:10252/metrics: dial tcp 192.168.5.102:10252: connect: connection refused
http://192.168.5.103:10252/metrics
down endpoint="http-metrics" instance="192.168.5.103:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-socialboost3" service="prometheus-prometheus-oper-kube-controller-manager" 29.756s ago 847.7us Get http://192.168.5.103:10252/metrics: dial tcp 192.168.5.103:10252: connect: connection refused

monitoring/prometheus-prometheus-oper-kube-etcd/0 (0/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.5.101:2379/metrics
down endpoint="http-metrics" instance="192.168.5.101:2379" job="kube-etcd" namespace="kube-system" pod="etcd-socialboost1" service="prometheus-prometheus-oper-kube-etcd" 28.181s ago 1.239ms Get http://192.168.5.101:2379/metrics: read tcp 10.244.0.42:36218->192.168.5.101:2379: read: connection reset by peer
http://192.168.5.102:2379/metrics
down endpoint="http-metrics" instance="192.168.5.102:2379" job="kube-etcd" namespace="kube-system" pod="etcd-socialboost2" service="prometheus-prometheus-oper-kube-etcd" 12.862s ago 1.19ms Get http://192.168.5.102:2379/metrics: read tcp 10.244.0.42:36206->192.168.5.102:2379: read: connection reset by peer
http://192.168.5.103:2379/metrics
down endpoint="http-metrics" instance="192.168.5.103:2379" job="kube-etcd" namespace="kube-system" pod="etcd-socialboost3" service="prometheus-prometheus-oper-kube-etcd" 28.674s ago 1.281ms Get http://192.168.5.103:2379/metrics: read tcp 10.244.0.42:33032->192.168.5.103:2379: read: connection reset by peer

monitoring/prometheus-prometheus-oper-kube-proxy/0 (0/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.5.101:10249/metrics
down endpoint="http-metrics" instance="192.168.5.101:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-vw25n" service="prometheus-prometheus-oper-kube-proxy" 2.454s ago 1.132ms Get http://192.168.5.101:10249/metrics: dial tcp 192.168.5.101:10249: connect: connection refused
http://192.168.5.102:10249/metrics
down endpoint="http-metrics" instance="192.168.5.102:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-lg2qt" service="prometheus-prometheus-oper-kube-proxy" 5.819s ago 1.091ms Get http://192.168.5.102:10249/metrics: dial tcp 192.168.5.102:10249: connect: connection refused
http://192.168.5.103:10249/metrics
down endpoint="http-metrics" instance="192.168.5.103:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-pqhsr" service="prometheus-prometheus-oper-kube-proxy" 2.247s ago 708.8us Get http://192.168.5.103:10249/metrics: dial tcp 192.168.5.103:10249: connect: connection refused

monitoring/prometheus-prometheus-oper-kube-scheduler/0 (0/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.5.101:10251/metrics
down endpoint="http-metrics" instance="192.168.5.101:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-socialboost1" service="prometheus-prometheus-oper-kube-scheduler" 13.552s ago 952.7us Get http://192.168.5.101:10251/metrics: dial tcp 192.168.5.101:10251: connect: connection refused
http://192.168.5.102:10251/metrics
down endpoint="http-metrics" instance="192.168.5.102:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-socialboost2" service="prometheus-prometheus-oper-kube-scheduler" 8.822s ago 922.5us Get http://192.168.5.102:10251/metrics: dial tcp 192.168.5.102:10251: connect: connection refused
http://192.168.5.103:10251/metrics
down endpoint="http-metrics" instance="192.168.5.103:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-socialboost3" service="prometheus-prometheus-oper-kube-scheduler" 17.559s ago 1.036ms Get http://192.168.5.103:10251/metrics: dial tcp 192.168.5.103:10251: connect: connection refused

monitoring/prometheus-prometheus-oper-kube-state-metrics/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.244.1.48:8080/metrics
up endpoint="http" instance="10.244.1.48:8080" job="kube-state-metrics" namespace="monitoring" pod="prometheus-kube-state-metrics-58fbd9f8ff-rvcg7" service="prometheus-kube-state-metrics" 5.673s ago 110.5ms

monitoring/prometheus-prometheus-oper-kubelet/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.5.101:10250/metrics
up endpoint="https-metrics" instance="192.168.5.101:10250" job="kubelet" namespace="kube-system" node="socialboost1" service="prometheus-prometheus-oper-kubelet" 1.716s ago 24.72ms
https://192.168.5.102:10250/metrics
up endpoint="https-metrics" instance="192.168.5.102:10250" job="kubelet" namespace="kube-system" node="socialboost2" service="prometheus-prometheus-oper-kubelet" 2.355s ago 42.38ms
https://192.168.5.103:10250/metrics
up endpoint="https-metrics" instance="192.168.5.103:10250" job="kubelet" namespace="kube-system" node="socialboost3" service="prometheus-prometheus-oper-kubelet" 27.217s ago 36.3ms

monitoring/prometheus-prometheus-oper-kubelet/1 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.5.101:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.5.101:10250" job="kubelet" namespace="kube-system" node="socialboost1" service="prometheus-prometheus-oper-kubelet" 27.525s ago 243.3ms
https://192.168.5.102:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.5.102:10250" job="kubelet" namespace="kube-system" node="socialboost2" service="prometheus-prometheus-oper-kubelet" 3.311s ago 656.4ms
https://192.168.5.103:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.5.103:10250" job="kubelet" namespace="kube-system" node="socialboost3" service="prometheus-prometheus-oper-kubelet" 20.307s ago 680.1ms

monitoring/prometheus-prometheus-oper-node-exporter/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.5.101:9100/metrics
up endpoint="metrics" instance="192.168.5.101:9100" job="node-exporter" namespace="monitoring" pod="prometheus-prometheus-node-exporter-92djt" service="prometheus-prometheus-node-exporter" 11.507s ago 70.72ms
http://192.168.5.102:9100/metrics
up endpoint="metrics" instance="192.168.5.102:9100" job="node-exporter" namespace="monitoring" pod="prometheus-prometheus-node-exporter-vftnn" service="prometheus-prometheus-node-exporter" 9.929s ago 117.5ms
http://192.168.5.103:9100/metrics
up endpoint="metrics" instance="192.168.5.103:9100" job="node-exporter" namespace="monitoring" pod="prometheus-prometheus-node-exporter-qbgj7" service="prometheus-prometheus-node-exporter" 2.33s ago 122.1ms

monitoring/prometheus-prometheus-oper-operator/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.244.0.55:8080/metrics
up endpoint="http" instance="10.244.0.55:8080" job="prometheus-prometheus-oper-operator" namespace="monitoring" pod="prometheus-prometheus-oper-operator-858c5646f6-66fjg" service="prometheus-prometheus-oper-operator" 16.83s ago 5.639ms

monitoring/prometheus-prometheus-oper-prometheus/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.244.0.42:9090/metrics
up endpoint="web" instance="10.244.0.42:9090" job="prometheus-prometheus-oper-prometheus" namespace="monitoring" pod="prometheus-prometheus-prometheus-oper-prometheus-0" service="prometheus-prometheus-oper-prometheus" 61ms ago 26.21ms