| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://192.168.5.101:9283/metrics |
down | endpoint="metrics" instance="192.168.5.101:9283" job="ceph-metrics" namespace="monitoring" service="ceph-metrics" | 1.908s ago | 794.8us | Get http://192.168.5.101:9283/metrics: dial tcp 192.168.5.101:9283: connect: connection refused |
|
http://192.168.5.102:9283/metrics |
down | endpoint="metrics" instance="192.168.5.102:9283" job="ceph-metrics" namespace="monitoring" service="ceph-metrics" | 2.258s ago | 829.2us | Get http://192.168.5.102:9283/metrics: dial tcp 192.168.5.102:9283: connect: connection refused |
|
http://192.168.5.103:9283/metrics |
down | endpoint="metrics" instance="192.168.5.103:9283" job="ceph-metrics" namespace="monitoring" service="ceph-metrics" | 6.936s ago | 762.1us | Get http://192.168.5.103:9283/metrics: dial tcp 192.168.5.103:9283: connect: connection refused |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.244.0.178:9093/metrics |
up | endpoint="web" instance="10.244.0.178:9093" job="prometheus-prometheus-oper-alertmanager" namespace="monitoring" pod="alertmanager-prometheus-prometheus-oper-alertmanager-0" service="prometheus-prometheus-oper-alertmanager" | 24.988s ago | 5.786ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
https://192.168.5.101:6443/metrics |
up | endpoint="https" instance="192.168.5.101:6443" job="apiserver" namespace="default" service="kubernetes" | 24.521s ago | 142.7ms | |
|
https://192.168.5.102:6443/metrics |
up | endpoint="https" instance="192.168.5.102:6443" job="apiserver" namespace="default" service="kubernetes" | 7.094s ago | 131.2ms | |
|
https://192.168.5.103:6443/metrics |
up | endpoint="https" instance="192.168.5.103:6443" job="apiserver" namespace="default" service="kubernetes" | 19.795s ago | 226.6ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.244.0.19:9153/metrics |
up | endpoint="http-metrics" instance="10.244.0.19:9153" job="coredns" namespace="kube-system" pod="coredns-7f55f5dc96-t6wfr" service="prometheus-prometheus-oper-coredns" | 679ms ago | 9.078ms | |
|
http://10.244.1.182:9153/metrics |
up | endpoint="http-metrics" instance="10.244.1.182:9153" job="coredns" namespace="kube-system" pod="coredns-7f55f5dc96-npl79" service="prometheus-prometheus-oper-coredns" | 12.859s ago | 7.137ms | |
|
http://10.244.2.243:9153/metrics |
up | endpoint="http-metrics" instance="10.244.2.243:9153" job="coredns" namespace="kube-system" pod="coredns-7f55f5dc96-gwxpt" service="prometheus-prometheus-oper-coredns" | 27.273s ago | 9.721ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.244.2.161:3000/metrics |
up | endpoint="service" instance="10.244.2.161:3000" job="prometheus-grafana" namespace="monitoring" pod="prometheus-grafana-7bfc4cc467-tgbxg" service="prometheus-grafana" | 25.868s ago | 12.24ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://192.168.5.101:10252/metrics |
down | endpoint="http-metrics" instance="192.168.5.101:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-socialboost1" service="prometheus-prometheus-oper-kube-controller-manager" | 2.584s ago | 904.9us | Get http://192.168.5.101:10252/metrics: dial tcp 192.168.5.101:10252: connect: connection refused |
|
http://192.168.5.102:10252/metrics |
down | endpoint="http-metrics" instance="192.168.5.102:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-socialboost2" service="prometheus-prometheus-oper-kube-controller-manager" | 13.888s ago | 1.107ms | Get http://192.168.5.102:10252/metrics: dial tcp 192.168.5.102:10252: connect: connection refused |
|
http://192.168.5.103:10252/metrics |
down | endpoint="http-metrics" instance="192.168.5.103:10252" job="kube-controller-manager" namespace="kube-system" pod="kube-controller-manager-socialboost3" service="prometheus-prometheus-oper-kube-controller-manager" | 4.971s ago | 1.056ms | Get http://192.168.5.103:10252/metrics: dial tcp 192.168.5.103:10252: connect: connection refused |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://192.168.5.101:2379/metrics |
down | endpoint="http-metrics" instance="192.168.5.101:2379" job="kube-etcd" namespace="kube-system" pod="etcd-socialboost1" service="prometheus-prometheus-oper-kube-etcd" | 3.397s ago | 1.606ms | Get http://192.168.5.101:2379/metrics: read tcp 10.244.0.221:60530->192.168.5.101:2379: read: connection reset by peer |
|
http://192.168.5.102:2379/metrics |
down | endpoint="http-metrics" instance="192.168.5.102:2379" job="kube-etcd" namespace="kube-system" pod="etcd-socialboost2" service="prometheus-prometheus-oper-kube-etcd" | 18.078s ago | 1.596ms | Get http://192.168.5.102:2379/metrics: read tcp 10.244.0.221:40494->192.168.5.102:2379: read: connection reset by peer |
|
http://192.168.5.103:2379/metrics |
down | endpoint="http-metrics" instance="192.168.5.103:2379" job="kube-etcd" namespace="kube-system" pod="etcd-socialboost3" service="prometheus-prometheus-oper-kube-etcd" | 3.889s ago | 1.395ms | Get http://192.168.5.103:2379/metrics: read tcp 10.244.0.221:59470->192.168.5.103:2379: read: connection reset by peer |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://192.168.5.101:10249/metrics |
down | endpoint="http-metrics" instance="192.168.5.101:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-vw25n" service="prometheus-prometheus-oper-kube-proxy" | 7.669s ago | 1.216ms | Get http://192.168.5.101:10249/metrics: dial tcp 192.168.5.101:10249: connect: connection refused |
|
http://192.168.5.102:10249/metrics |
down | endpoint="http-metrics" instance="192.168.5.102:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-lg2qt" service="prometheus-prometheus-oper-kube-proxy" | 11.034s ago | 1.003ms | Get http://192.168.5.102:10249/metrics: dial tcp 192.168.5.102:10249: connect: connection refused |
|
http://192.168.5.103:10249/metrics |
down | endpoint="http-metrics" instance="192.168.5.103:10249" job="kube-proxy" namespace="kube-system" pod="kube-proxy-pqhsr" service="prometheus-prometheus-oper-kube-proxy" | 7.462s ago | 772.3us | Get http://192.168.5.103:10249/metrics: dial tcp 192.168.5.103:10249: connect: connection refused |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://192.168.5.101:10251/metrics |
down | endpoint="http-metrics" instance="192.168.5.101:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-socialboost1" service="prometheus-prometheus-oper-kube-scheduler" | 18.767s ago | 1.011ms | Get http://192.168.5.101:10251/metrics: dial tcp 192.168.5.101:10251: connect: connection refused |
|
http://192.168.5.102:10251/metrics |
down | endpoint="http-metrics" instance="192.168.5.102:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-socialboost2" service="prometheus-prometheus-oper-kube-scheduler" | 14.037s ago | 1.09ms | Get http://192.168.5.102:10251/metrics: dial tcp 192.168.5.102:10251: connect: connection refused |
|
http://192.168.5.103:10251/metrics |
down | endpoint="http-metrics" instance="192.168.5.103:10251" job="kube-scheduler" namespace="kube-system" pod="kube-scheduler-socialboost3" service="prometheus-prometheus-oper-kube-scheduler" | 22.773s ago | 817.7us | Get http://192.168.5.103:10251/metrics: dial tcp 192.168.5.103:10251: connect: connection refused |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.244.1.44:8080/metrics |
up | endpoint="http" instance="10.244.1.44:8080" job="kube-state-metrics" namespace="monitoring" pod="prometheus-kube-state-metrics-58fbd9f8ff-rvcg7" service="prometheus-kube-state-metrics" | 1.968s ago | 105.2ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
https://192.168.5.101:10250/metrics |
up | endpoint="https-metrics" instance="192.168.5.101:10250" job="kubelet" namespace="kube-system" node="socialboost1" service="prometheus-prometheus-oper-kubelet" | 6.931s ago | 50.25ms | |
|
https://192.168.5.102:10250/metrics |
up | endpoint="https-metrics" instance="192.168.5.102:10250" job="kubelet" namespace="kube-system" node="socialboost2" service="prometheus-prometheus-oper-kubelet" | 7.57s ago | 49.19ms | |
|
https://192.168.5.103:10250/metrics |
up | endpoint="https-metrics" instance="192.168.5.103:10250" job="kubelet" namespace="kube-system" node="socialboost3" service="prometheus-prometheus-oper-kubelet" | 2.432s ago | 102.4ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
https://192.168.5.101:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.5.101:10250" job="kubelet" namespace="kube-system" node="socialboost1" service="prometheus-prometheus-oper-kubelet" | 2.74s ago | 337ms | |
|
https://192.168.5.102:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.5.102:10250" job="kubelet" namespace="kube-system" node="socialboost2" service="prometheus-prometheus-oper-kubelet" | 8.526s ago | 543ms | |
|
https://192.168.5.103:10250/metrics/cadvisor |
up | endpoint="https-metrics" instance="192.168.5.103:10250" job="kubelet" namespace="kube-system" node="socialboost3" service="prometheus-prometheus-oper-kubelet" | 25.522s ago | 696.1ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://192.168.5.101:9100/metrics |
up | endpoint="metrics" instance="192.168.5.101:9100" job="node-exporter" namespace="monitoring" pod="prometheus-prometheus-node-exporter-92djt" service="prometheus-prometheus-node-exporter" | 16.723s ago | 73.45ms | |
|
http://192.168.5.102:9100/metrics |
up | endpoint="metrics" instance="192.168.5.102:9100" job="node-exporter" namespace="monitoring" pod="prometheus-prometheus-node-exporter-vftnn" service="prometheus-prometheus-node-exporter" | 15.146s ago | 104.5ms | |
|
http://192.168.5.103:9100/metrics |
up | endpoint="metrics" instance="192.168.5.103:9100" job="node-exporter" namespace="monitoring" pod="prometheus-prometheus-node-exporter-qbgj7" service="prometheus-prometheus-node-exporter" | 7.548s ago | 127.8ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.244.0.199:8080/metrics |
up | endpoint="http" instance="10.244.0.199:8080" job="prometheus-prometheus-oper-operator" namespace="monitoring" pod="prometheus-prometheus-oper-operator-858c5646f6-66fjg" service="prometheus-prometheus-oper-operator" | 12.469s ago | 5.379ms |
| Endpoint | State | Labels | Last Scrape | Scrape Duration | Error |
|---|---|---|---|---|---|
|
http://10.244.0.221:9090/metrics |
up | endpoint="web" instance="10.244.0.221:9090" job="prometheus-prometheus-oper-prometheus" namespace="monitoring" pod="prometheus-prometheus-prometheus-oper-prometheus-0" service="prometheus-prometheus-oper-prometheus" | 6.365s ago | 20.3ms |