Targets


monitoring/ceph-metrics-sm/0 (0/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.5.101:9283/metrics
down endpoint="metrics" instance="192.168.5.101:9283" job="ceph-metrics" namespace="monitoring" service="ceph-metrics" 709ms ago 959.1us Get http://192.168.5.101:9283/metrics: dial tcp 192.168.5.101:9283: connect: connection refused
http://192.168.5.102:9283/metrics
down endpoint="metrics" instance="192.168.5.102:9283" job="ceph-metrics" namespace="monitoring" service="ceph-metrics" 1.059s ago 916.8us Get http://192.168.5.102:9283/metrics: dial tcp 192.168.5.102:9283: connect: connection refused
http://192.168.5.103:9283/metrics
down endpoint="metrics" instance="192.168.5.103:9283" job="ceph-metrics" namespace="monitoring" service="ceph-metrics" 5.737s ago 910.2us Get http://192.168.5.103:9283/metrics: dial tcp 192.168.5.103:9283: connect: connection refused

monitoring/prometheus-prometheus-oper-alertmanager/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.244.0.178:9093/metrics
up endpoint="web" instance="10.244.0.178:9093" job="prometheus-prometheus-oper-alertmanager" namespace="monitoring" pod="alertmanager-prometheus-prometheus-oper-alertmanager-0" service="prometheus-prometheus-oper-alertmanager" 13.789s ago 5.652ms

monitoring/prometheus-prometheus-oper-apiserver/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.5.101:6443/metrics
up endpoint="https" instance="192.168.5.101:6443" job="apiserver" namespace="default" service="kubernetes" 13.321s ago 125.1ms
https://192.168.5.102:6443/metrics
up endpoint="https" instance="192.168.5.102:6443" job="apiserver" namespace="default" service="kubernetes" 25.894s ago 186.5ms
https://192.168.5.103:6443/metrics
up endpoint="https" instance="192.168.5.103:6443" job="apiserver" namespace="default" service="kubernetes" 8.595s ago 189.3ms

monitoring/prometheus-prometheus-oper-coredns/0 (2/2 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.244.2.167:9153/metrics
up endpoint="http-metrics" instance="10.244.2.167:9153" job="coredns" namespace="kube-system" pod="coredns-74ff55c5b-v7zk5" service="prometheus-prometheus-oper-coredns" 22.342s ago 9.465ms
http://10.244.2.219:9153/metrics
up endpoint="http-metrics" instance="10.244.2.219:9153" job="coredns" namespace="kube-system" pod="coredns-74ff55c5b-8zmwc" service="prometheus-prometheus-oper-coredns" 20.587s ago 10.28ms

monitoring/prometheus-prometheus-oper-grafana/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.244.2.161:3000/metrics
up endpoint="service" instance="10.244.2.161:3000" job="prometheus-grafana" namespace="monitoring" pod="prometheus-grafana-7bfc4cc467-tgbxg" service="prometheus-grafana" 14.667s ago 7.292ms

monitoring/prometheus-prometheus-oper-kube-controller-manager/0 (0/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.5.101:10252/metrics
down endpoint="http-metrics" instance="192.168.5.101:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-socialboost1" service="prometheus-prometheus-oper-kube-controller-manager" 21.383s ago 892.5us Get http://192.168.5.101:10252/metrics: dial tcp 192.168.5.101:10252: connect: connection refused
http://192.168.5.102:10252/metrics
down endpoint="http-metrics" instance="192.168.5.102:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-socialboost2" service="prometheus-prometheus-oper-kube-controller-manager" 2.687s ago 1.082ms Get http://192.168.5.102:10252/metrics: dial tcp 192.168.5.102:10252: connect: connection refused
http://192.168.5.103:10252/metrics
down endpoint="http-metrics" instance="192.168.5.103:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-socialboost3" service="prometheus-prometheus-oper-kube-controller-manager" 23.77s ago 930.2us Get http://192.168.5.103:10252/metrics: dial tcp 192.168.5.103:10252: connect: connection refused

monitoring/prometheus-prometheus-oper-kube-etcd/0 (0/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.5.101:2379/metrics
down endpoint="http-metrics" instance="192.168.5.101:2379" job="kube-etcd" namespace="kube-system" pod="etcd-socialboost1" service="prometheus-prometheus-oper-kube-etcd" 22.195s ago 1.307ms Get http://192.168.5.101:2379/metrics: read tcp 10.244.0.221:56006->192.168.5.101:2379: read: connection reset by peer
http://192.168.5.102:2379/metrics
down endpoint="http-metrics" instance="192.168.5.102:2379" job="kube-etcd" namespace="kube-system" pod="etcd-socialboost2" service="prometheus-prometheus-oper-kube-etcd" 6.875s ago 1.54ms Get http://192.168.5.102:2379/metrics: read tcp 10.244.0.221:60396->192.168.5.102:2379: read: connection reset by peer
http://192.168.5.103:2379/metrics
down endpoint="http-metrics" instance="192.168.5.103:2379" job="kube-etcd" namespace="kube-system" pod="etcd-socialboost3" service="prometheus-prometheus-oper-kube-etcd" 22.687s ago 1.582ms Get http://192.168.5.103:2379/metrics: read tcp 10.244.0.221:54062->192.168.5.103:2379: read: connection reset by peer

monitoring/prometheus-prometheus-oper-kube-proxy/0 (0/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.5.101:10249/metrics
down endpoint="http-metrics" instance="192.168.5.101:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-vw25n" service="prometheus-prometheus-oper-kube-proxy" 26.467s ago 1.344ms Get http://192.168.5.101:10249/metrics: dial tcp 192.168.5.101:10249: connect: connection refused
http://192.168.5.102:10249/metrics
down endpoint="http-metrics" instance="192.168.5.102:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-lg2qt" service="prometheus-prometheus-oper-kube-proxy" 29.831s ago 1.069ms Get http://192.168.5.102:10249/metrics: dial tcp 192.168.5.102:10249: connect: connection refused
http://192.168.5.103:10249/metrics
down endpoint="http-metrics" instance="192.168.5.103:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-pqhsr" service="prometheus-prometheus-oper-kube-proxy" 26.259s ago 771.1us Get http://192.168.5.103:10249/metrics: dial tcp 192.168.5.103:10249: connect: connection refused

monitoring/prometheus-prometheus-oper-kube-scheduler/0 (0/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.5.101:10251/metrics
down endpoint="http-metrics" instance="192.168.5.101:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-socialboost1" service="prometheus-prometheus-oper-kube-scheduler" 7.563s ago 1.146ms Get http://192.168.5.101:10251/metrics: dial tcp 192.168.5.101:10251: connect: connection refused
http://192.168.5.102:10251/metrics
down endpoint="http-metrics" instance="192.168.5.102:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-socialboost2" service="prometheus-prometheus-oper-kube-scheduler" 2.833s ago 892.9us Get http://192.168.5.102:10251/metrics: dial tcp 192.168.5.102:10251: connect: connection refused
http://192.168.5.103:10251/metrics
down endpoint="http-metrics" instance="192.168.5.103:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-socialboost3" service="prometheus-prometheus-oper-kube-scheduler" 11.569s ago 991.8us Get http://192.168.5.103:10251/metrics: dial tcp 192.168.5.103:10251: connect: connection refused

monitoring/prometheus-prometheus-oper-kube-state-metrics/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.244.1.44:8080/metrics
up endpoint="http" instance="10.244.1.44:8080" job="kube-state-metrics" namespace="monitoring" pod="prometheus-kube-state-metrics-58fbd9f8ff-rvcg7" service="prometheus-kube-state-metrics" 20.763s ago 103.8ms

monitoring/prometheus-prometheus-oper-kubelet/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.5.101:10250/metrics
up endpoint="https-metrics" instance="192.168.5.101:10250" job="kubelet" namespace="kube-system" node="socialboost1" service="prometheus-prometheus-oper-kubelet" 25.725s ago 28.52ms
https://192.168.5.102:10250/metrics
up endpoint="https-metrics" instance="192.168.5.102:10250" job="kubelet" namespace="kube-system" node="socialboost2" service="prometheus-prometheus-oper-kubelet" 26.365s ago 52.02ms
https://192.168.5.103:10250/metrics
up endpoint="https-metrics" instance="192.168.5.103:10250" job="kubelet" namespace="kube-system" node="socialboost3" service="prometheus-prometheus-oper-kubelet" 21.226s ago 118.3ms

monitoring/prometheus-prometheus-oper-kubelet/1 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
https://192.168.5.101:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.5.101:10250" job="kubelet" namespace="kube-system" node="socialboost1" service="prometheus-prometheus-oper-kubelet" 21.534s ago 274.9ms
https://192.168.5.102:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.5.102:10250" job="kubelet" namespace="kube-system" node="socialboost2" service="prometheus-prometheus-oper-kubelet" 27.32s ago 581.2ms
https://192.168.5.103:10250/metrics/cadvisor
up endpoint="https-metrics" instance="192.168.5.103:10250" job="kubelet" namespace="kube-system" node="socialboost3" service="prometheus-prometheus-oper-kubelet" 14.316s ago 749.6ms

monitoring/prometheus-prometheus-oper-node-exporter/0 (3/3 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://192.168.5.101:9100/metrics
up endpoint="metrics" instance="192.168.5.101:9100" job="node-exporter" namespace="monitoring" pod="prometheus-prometheus-node-exporter-92djt" service="prometheus-prometheus-node-exporter" 5.516s ago 66.13ms
http://192.168.5.102:9100/metrics
up endpoint="metrics" instance="192.168.5.102:9100" job="node-exporter" namespace="monitoring" pod="prometheus-prometheus-node-exporter-vftnn" service="prometheus-prometheus-node-exporter" 3.938s ago 110.2ms
http://192.168.5.103:9100/metrics
up endpoint="metrics" instance="192.168.5.103:9100" job="node-exporter" namespace="monitoring" pod="prometheus-prometheus-node-exporter-qbgj7" service="prometheus-prometheus-node-exporter" 26.34s ago 121.8ms

monitoring/prometheus-prometheus-oper-operator/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.244.0.199:8080/metrics
up endpoint="http" instance="10.244.0.199:8080" job="prometheus-prometheus-oper-operator" namespace="monitoring" pod="prometheus-prometheus-oper-operator-858c5646f6-66fjg" service="prometheus-prometheus-oper-operator" 1.26s ago 3.351ms

monitoring/prometheus-prometheus-oper-prometheus/0 (1/1 up)

Endpoint State Labels Last Scrape Scrape Duration Error
http://10.244.0.221:9090/metrics
up endpoint="web" instance="10.244.0.221:9090" job="prometheus-prometheus-oper-prometheus" namespace="monitoring" pod="prometheus-prometheus-prometheus-oper-prometheus-0" service="prometheus-prometheus-oper-prometheus" 25.156s ago 20.38ms