Alerts

/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-alertmanager.rules.yaml > alertmanager.rules
AlertmanagerConfigInconsistent (0 active)
AlertmanagerFailedReload (0 active)
alert: AlertmanagerFailedReload
expr: alertmanager_config_last_reload_successful{job="prometheus-prometheus-oper-alertmanager",namespace="monitoring"}
  == 0
for: 10m
labels:
  severity: warning
annotations:
  message: Reloading Alertmanager's configuration has failed for {{ $labels.namespace
    }}/{{ $labels.pod}}.
AlertmanagerMembersInconsistent (0 active)
/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-etcd.yaml > etcd
etcdInsufficientMembers (1 active)
alert: etcdInsufficientMembers
expr: sum
  by(job) (up{job=~".*etcd.*"} == bool 1) < ((count by(job) (up{job=~".*etcd.*"})
  + 1) / 2)
for: 3m
labels:
  severity: critical
annotations:
  message: 'etcd cluster "{{ $labels.job }}": insufficient members ({{ $value
    }}).'
Labels State Active Since Value
alertname="etcdInsufficientMembers" job="kube-etcd" severity="critical" firing 2025-10-04 07:24:42.582998871 +0000 UTC 0
etcdGRPCRequestsSlow (0 active)
alert: etcdGRPCRequestsSlow
expr: histogram_quantile(0.99,
  sum by(job, instance, grpc_service, grpc_method, le) (rate(grpc_server_handling_seconds_bucket{grpc_type="unary",job=~".*etcd.*"}[5m])))
  > 0.15
for: 10m
labels:
  severity: critical
annotations:
  message: 'etcd cluster "{{ $labels.job }}": gRPC requests to {{ $labels.grpc_method
    }} are taking {{ $value }}s on etcd instance {{ $labels.instance }}.'
etcdHTTPRequestsSlow (0 active)
alert: etcdHTTPRequestsSlow
expr: histogram_quantile(0.99,
  rate(etcd_http_successful_duration_seconds_bucket[5m])) > 0.15
for: 10m
labels:
  severity: warning
annotations:
  message: etcd instance {{ $labels.instance }} HTTP requests to {{ $labels.method
    }} are slow.
etcdHighCommitDurations (0 active)
alert: etcdHighCommitDurations
expr: histogram_quantile(0.99,
  rate(etcd_disk_backend_commit_duration_seconds_bucket{job=~".*etcd.*"}[5m]))
  > 0.25
for: 10m
labels:
  severity: warning
annotations:
  message: 'etcd cluster "{{ $labels.job }}": 99th percentile commit durations
    {{ $value }}s on etcd instance {{ $labels.instance }}.'
etcdHighFsyncDurations (0 active)
alert: etcdHighFsyncDurations
expr: histogram_quantile(0.99,
  rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=~".*etcd.*"}[5m]))
  > 0.5
for: 10m
labels:
  severity: warning
annotations:
  message: 'etcd cluster "{{ $labels.job }}": 99th percentile fync durations
    are {{ $value }}s on etcd instance {{ $labels.instance }}.'
etcdHighNumberOfFailedGRPCRequests (0 active)
alert: etcdHighNumberOfFailedGRPCRequests
expr: 100
  * sum by(job, instance, grpc_service, grpc_method) (rate(grpc_server_handled_total{grpc_code!="OK",job=~".*etcd.*"}[5m]))
  / sum by(job, instance, grpc_service, grpc_method) (rate(grpc_server_handled_total{job=~".*etcd.*"}[5m]))
  > 5
for: 5m
labels:
  severity: critical
annotations:
  message: 'etcd cluster "{{ $labels.job }}": {{ $value }}% of requests for
    {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.'
etcdHighNumberOfFailedGRPCRequests (0 active)
alert: etcdHighNumberOfFailedGRPCRequests
expr: 100
  * sum by(job, instance, grpc_service, grpc_method) (rate(grpc_server_handled_total{grpc_code!="OK",job=~".*etcd.*"}[5m]))
  / sum by(job, instance, grpc_service, grpc_method) (rate(grpc_server_handled_total{job=~".*etcd.*"}[5m]))
  > 1
for: 10m
labels:
  severity: warning
annotations:
  message: 'etcd cluster "{{ $labels.job }}": {{ $value }}% of requests for
    {{ $labels.grpc_method }} failed on etcd instance {{ $labels.instance }}.'
etcdHighNumberOfFailedHTTPRequests (0 active)
alert: etcdHighNumberOfFailedHTTPRequests
expr: sum
  by(method) (rate(etcd_http_failed_total{code!="404",job=~".*etcd.*"}[5m]))
  / sum by(method) (rate(etcd_http_received_total{job=~".*etcd.*"}[5m])) >
  0.05
for: 10m
labels:
  severity: critical
annotations:
  message: '{{ $value }}% of requests for {{ $labels.method }} failed on etcd instance
    {{ $labels.instance }}.'
etcdHighNumberOfFailedHTTPRequests (0 active)
alert: etcdHighNumberOfFailedHTTPRequests
expr: sum
  by(method) (rate(etcd_http_failed_total{code!="404",job=~".*etcd.*"}[5m]))
  / sum by(method) (rate(etcd_http_received_total{job=~".*etcd.*"}[5m])) >
  0.01
for: 10m
labels:
  severity: warning
annotations:
  message: '{{ $value }}% of requests for {{ $labels.method }} failed on etcd instance
    {{ $labels.instance }}'
etcdHighNumberOfFailedProposals (0 active)
alert: etcdHighNumberOfFailedProposals
expr: rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m])
  > 5
for: 15m
labels:
  severity: warning
annotations:
  message: 'etcd cluster "{{ $labels.job }}": {{ $value }} proposal failures
    within the last hour on etcd instance {{ $labels.instance }}.'
etcdHighNumberOfLeaderChanges (0 active)
alert: etcdHighNumberOfLeaderChanges
expr: rate(etcd_server_leader_changes_seen_total{job=~".*etcd.*"}[15m])
  > 3
for: 15m
labels:
  severity: warning
annotations:
  message: 'etcd cluster "{{ $labels.job }}": instance {{ $labels.instance
    }} has seen {{ $value }} leader changes within the last hour.'
etcdMemberCommunicationSlow (0 active)
alert: etcdMemberCommunicationSlow
expr: histogram_quantile(0.99,
  rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m]))
  > 0.15
for: 10m
labels:
  severity: warning
annotations:
  message: 'etcd cluster "{{ $labels.job }}": member communication with {{
    $labels.To }} is taking {{ $value }}s on etcd instance {{ $labels.instance }}.'
etcdNoLeader (0 active)
alert: etcdNoLeader
expr: etcd_server_has_leader{job=~".*etcd.*"}
  == 0
for: 1m
labels:
  severity: critical
annotations:
  message: 'etcd cluster "{{ $labels.job }}": member {{ $labels.instance }}
    has no leader.'
/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-general.rules.yaml > general.rules
TargetDown (5 active)
alert: TargetDown
expr: 100
  * (count by(job) (up == 0) / count by(job) (up)) > 10
for: 10m
labels:
  severity: warning
annotations:
  message: '{{ $value }}% of the {{ $labels.job }} targets are down.'
Labels State Active Since Value
alertname="TargetDown" job="ceph-metrics" severity="warning" firing 2025-10-04 07:24:26.904694511 +0000 UTC 100
alertname="TargetDown" job="kube-proxy" severity="warning" firing 2025-10-04 07:24:56.904694511 +0000 UTC 100
alertname="TargetDown" job="kube-controller-manager" severity="warning" firing 2025-10-04 07:24:56.904694511 +0000 UTC 100
alertname="TargetDown" job="kube-etcd" severity="warning" firing 2025-10-04 07:24:26.904694511 +0000 UTC 100
alertname="TargetDown" job="kube-scheduler" severity="warning" firing 2025-10-04 07:24:26.904694511 +0000 UTC 100
Watchdog (1 active)
alert: Watchdog
expr: vector(1)
labels:
  severity: none
annotations:
  message: |
    This is an alert meant to ensure that the entire alerting pipeline is functional.
    This alert is always firing, therefore it should always be firing in Alertmanager
    and always fire against a receiver. There are integrations with various notification
    mechanisms that send a notification when this alert is not firing. For example the
    "DeadMansSnitch" integration in PagerDuty.
Labels State Active Since Value
alertname="Watchdog" severity="none" firing 2025-10-04 07:24:26.904694511 +0000 UTC 1
/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-kube-prometheus-node-alerting.rules.yaml > kube-prometheus-node-alerting.rules
NodeDiskRunningFull (0 active)
alert: NodeDiskRunningFull
expr: '(node:node_filesystem_usage:
  > 0.85) and (predict_linear(node:node_filesystem_avail:[6h], 3600 * 24) <
  0)'
for: 30m
labels:
  severity: warning
annotations:
  message: Device {{ $labels.device }} on node {{ $labels.instance }} will be full
    within the next 24 hours.
NodeDiskRunningFull (0 active)
alert: NodeDiskRunningFull
expr: '(node:node_filesystem_usage:
  > 0.85) and (predict_linear(node:node_filesystem_avail:[30m], 3600 * 2) <
  0)'
for: 10m
labels:
  severity: critical
annotations:
  message: Device {{ $labels.device }} on node {{ $labels.instance }} will be full
    within the next 2 hours.
/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-kubernetes-absent.yaml > kubernetes-absent
KubeControllerManagerDown (1 active)
alert: KubeControllerManagerDown
expr: absent(up{job="kube-controller-manager"}
  == 1)
for: 15m
labels:
  severity: critical
annotations:
  message: KubeControllerManager has disappeared from Prometheus target discovery.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecontrollermanagerdown
Labels State Active Since Value
alertname="KubeControllerManagerDown" severity="critical" firing 2025-10-04 07:24:38.65408944 +0000 UTC 1
KubeSchedulerDown (1 active)
alert: KubeSchedulerDown
expr: absent(up{job="kube-scheduler"}
  == 1)
for: 15m
labels:
  severity: critical
annotations:
  message: KubeScheduler has disappeared from Prometheus target discovery.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeschedulerdown
Labels State Active Since Value
alertname="KubeSchedulerDown" severity="critical" firing 2025-10-04 07:24:38.65408944 +0000 UTC 1
AlertmanagerDown (0 active)
alert: AlertmanagerDown
expr: absent(up{job="prometheus-prometheus-oper-alertmanager",namespace="monitoring"}
  == 1)
for: 15m
labels:
  severity: critical
annotations:
  message: Alertmanager has disappeared from Prometheus target discovery.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-alertmanagerdown
KubeAPIDown (0 active)
alert: KubeAPIDown
expr: absent(up{job="apiserver"}
  == 1)
for: 15m
labels:
  severity: critical
annotations:
  message: KubeAPI has disappeared from Prometheus target discovery.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapidown
KubeStateMetricsDown (0 active)
alert: KubeStateMetricsDown
expr: absent(up{job="kube-state-metrics"}
  == 1)
for: 15m
labels:
  severity: critical
annotations:
  message: KubeStateMetrics has disappeared from Prometheus target discovery.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatemetricsdown
KubeletDown (0 active)
alert: KubeletDown
expr: absent(up{job="kubelet"}
  == 1)
for: 15m
labels:
  severity: critical
annotations:
  message: Kubelet has disappeared from Prometheus target discovery.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletdown
NodeExporterDown (0 active)
alert: NodeExporterDown
expr: absent(up{job="node-exporter"}
  == 1)
for: 15m
labels:
  severity: critical
annotations:
  message: NodeExporter has disappeared from Prometheus target discovery.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-nodeexporterdown
PrometheusDown (0 active)
alert: PrometheusDown
expr: absent(up{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}
  == 1)
for: 15m
labels:
  severity: critical
annotations:
  message: Prometheus has disappeared from Prometheus target discovery.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-prometheusdown
PrometheusOperatorDown (0 active)
alert: PrometheusOperatorDown
expr: absent(up{job="prometheus-prometheus-oper-operator",namespace="monitoring"}
  == 1)
for: 15m
labels:
  severity: critical
annotations:
  message: PrometheusOperator has disappeared from Prometheus target discovery.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-prometheusoperatordown
/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-kubernetes-apps.yaml > kubernetes-apps
KubeJobCompletion (1 active)
alert: KubeJobCompletion
expr: kube_job_spec_completions{job="kube-state-metrics"}
  - kube_job_status_succeeded{job="kube-state-metrics"} > 0
for: 1h
labels:
  severity: warning
annotations:
  message: Job {{ $labels.namespace }}/{{ $labels.job_name }} is taking more than
    one hour to complete.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobcompletion
Labels State Active Since Value
alertname="KubeJobCompletion" endpoint="http" instance="10.244.1.48:8080" job="kube-state-metrics" job_name="gitlab-task-runner-backup-1765065600" namespace="gitlab" pod="prometheus-kube-state-metrics-58fbd9f8ff-rvcg7" service="prometheus-kube-state-metrics" severity="warning" firing 2025-12-07 00:00:44.021059511 +0000 UTC 1
KubeCronJobRunning (0 active)
alert: KubeCronJobRunning
expr: time()
  - kube_cronjob_next_schedule_time{job="kube-state-metrics"} > 3600
for: 1h
labels:
  severity: warning
annotations:
  message: CronJob {{ $labels.namespace }}/{{ $labels.cronjob }} is taking more than
    1h to complete.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecronjobrunning
KubeDaemonSetMisScheduled (0 active)
alert: KubeDaemonSetMisScheduled
expr: kube_daemonset_status_number_misscheduled{job="kube-state-metrics"}
  > 0
for: 10m
labels:
  severity: warning
annotations:
  message: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset
    }} are running where they are not supposed to run.'
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetmisscheduled
KubeDaemonSetNotScheduled (0 active)
alert: KubeDaemonSetNotScheduled
expr: kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics"}
  - kube_daemonset_status_current_number_scheduled{job="kube-state-metrics"}
  > 0
for: 10m
labels:
  severity: warning
annotations:
  message: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset
    }} are not scheduled.'
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetnotscheduled
KubeDaemonSetRolloutStuck (0 active)
alert: KubeDaemonSetRolloutStuck
expr: kube_daemonset_status_number_ready{job="kube-state-metrics"}
  / kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics"}
  * 100 < 100
for: 15m
labels:
  severity: critical
annotations:
  message: Only {{ $value }}% of the desired Pods of DaemonSet {{ $labels.namespace
    }}/{{ $labels.daemonset }} are scheduled and ready.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetrolloutstuck
KubeDeploymentGenerationMismatch (0 active)
alert: KubeDeploymentGenerationMismatch
expr: kube_deployment_status_observed_generation{job="kube-state-metrics"}
  != kube_deployment_metadata_generation{job="kube-state-metrics"}
for: 15m
labels:
  severity: critical
annotations:
  message: Deployment generation for {{ $labels.namespace }}/{{ $labels.deployment
    }} does not match, this indicates that the Deployment has failed but has not been
    rolled back.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentgenerationmismatch
KubeDeploymentReplicasMismatch (0 active)
alert: KubeDeploymentReplicasMismatch
expr: kube_deployment_spec_replicas{job="kube-state-metrics"}
  != kube_deployment_status_replicas_available{job="kube-state-metrics"}
for: 1h
labels:
  severity: critical
annotations:
  message: Deployment {{ $labels.namespace }}/{{ $labels.deployment }} has not matched
    the expected number of replicas for longer than an hour.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentreplicasmismatch
KubeJobFailed (0 active)
alert: KubeJobFailed
expr: kube_job_status_failed{job="kube-state-metrics"}
  > 0
for: 1h
labels:
  severity: warning
annotations:
  message: Job {{ $labels.namespace }}/{{ $labels.job_name }} failed to complete.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobfailed
KubePodCrashLooping (0 active)
alert: KubePodCrashLooping
expr: rate(kube_pod_container_status_restarts_total{job="kube-state-metrics"}[15m])
  * 60 * 5 > 0
for: 1h
labels:
  severity: critical
annotations:
  message: Pod {{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container }})
    is restarting {{ printf "%.2f" $value }} times / 5 minutes.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodcrashlooping
KubePodNotReady (0 active)
alert: KubePodNotReady
expr: sum
  by(namespace, pod) (kube_pod_status_phase{job="kube-state-metrics",phase=~"Failed|Pending|Unknown"})
  > 0
for: 1h
labels:
  severity: critical
annotations:
  message: Pod {{ $labels.namespace }}/{{ $labels.pod }} has been in a non-ready state
    for longer than an hour.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodnotready
KubeStatefulSetGenerationMismatch (0 active)
alert: KubeStatefulSetGenerationMismatch
expr: kube_statefulset_status_observed_generation{job="kube-state-metrics"}
  != kube_statefulset_metadata_generation{job="kube-state-metrics"}
for: 15m
labels:
  severity: critical
annotations:
  message: StatefulSet generation for {{ $labels.namespace }}/{{ $labels.statefulset
    }} does not match, this indicates that the StatefulSet has failed but has not
    been rolled back.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetgenerationmismatch
KubeStatefulSetReplicasMismatch (0 active)
alert: KubeStatefulSetReplicasMismatch
expr: kube_statefulset_status_replicas_ready{job="kube-state-metrics"}
  != kube_statefulset_status_replicas{job="kube-state-metrics"}
for: 15m
labels:
  severity: critical
annotations:
  message: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} has not matched
    the expected number of replicas for longer than 15 minutes.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetreplicasmismatch
KubeStatefulSetUpdateNotRolledOut (0 active)
alert: KubeStatefulSetUpdateNotRolledOut
expr: max
  without(revision) (kube_statefulset_status_current_revision{job="kube-state-metrics"}
  unless kube_statefulset_status_update_revision{job="kube-state-metrics"})
  * (kube_statefulset_replicas{job="kube-state-metrics"} != kube_statefulset_status_replicas_updated{job="kube-state-metrics"})
for: 15m
labels:
  severity: critical
annotations:
  message: StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} update has
    not been rolled out.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubestatefulsetupdatenotrolledout
/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-kubernetes-resources.yaml > kubernetes-resources
CPUThrottlingHigh (3 active)
alert: CPUThrottlingHigh
expr: 100
  * sum by(container, pod, namespace) (increase(container_cpu_cfs_throttled_periods_total{container!=""}[5m]))
  / sum by(container, pod, namespace) (increase(container_cpu_cfs_periods_total[5m]))
  > 25
for: 15m
labels:
  severity: warning
annotations:
  message: '{{ printf "%0.0f" $value }}% throttling of CPU in namespace {{
    $labels.namespace }} for container {{ $labels.container }} in pod {{ $labels.pod
    }}.'
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-cputhrottlinghigh
Labels State Active Since Value
alertname="CPUThrottlingHigh" container="kube-flannel" namespace="kube-system" pod="kube-flannel-ds-9vx64" severity="warning" firing 2025-10-04 07:24:58.714203701 +0000 UTC 38.73873873873874
alertname="CPUThrottlingHigh" container="kube-flannel" namespace="kube-system" pod="kube-flannel-ds-kjjh4" severity="warning" firing 2025-10-11 19:41:58.714203701 +0000 UTC 44.91869918699187
alertname="CPUThrottlingHigh" container="kube-flannel" namespace="kube-system" pod="kube-flannel-ds-rj8mc" severity="warning" firing 2025-10-04 07:25:28.714203701 +0000 UTC 41.34615384615385
KubeMemOvercommit (1 active)
alert: KubeMemOvercommit
expr: sum(namespace:kube_pod_container_resource_requests_memory_bytes:sum)
  / sum(kube_node_status_allocatable_memory_bytes) > (count(kube_node_status_allocatable_memory_bytes)
  - 1) / count(kube_node_status_allocatable_memory_bytes)
for: 5m
labels:
  severity: warning
annotations:
  message: Cluster has overcommitted memory resource requests for Pods and cannot
    tolerate node failure.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubememovercommit
Labels State Active Since Value
alertname="KubeMemOvercommit" severity="warning" firing 2025-10-11 19:51:28.714203701 +0000 UTC 0.6725526718056776
KubeCPUOvercommit (0 active)
alert: KubeCPUOvercommit
expr: sum(namespace:kube_pod_container_resource_requests_cpu_cores:sum)
  / sum(kube_node_status_allocatable_cpu_cores) > (count(kube_node_status_allocatable_cpu_cores)
  - 1) / count(kube_node_status_allocatable_cpu_cores)
for: 5m
labels:
  severity: warning
annotations:
  message: Cluster has overcommitted CPU resource requests for Pods and cannot tolerate
    node failure.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuovercommit
KubeCPUOvercommit (0 active)
alert: KubeCPUOvercommit
expr: sum(kube_resourcequota{job="kube-state-metrics",resource="cpu",type="hard"})
  / sum(kube_node_status_allocatable_cpu_cores) > 1.5
for: 5m
labels:
  severity: warning
annotations:
  message: Cluster has overcommitted CPU resource requests for Namespaces.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuovercommit
KubeMemOvercommit (0 active)
alert: KubeMemOvercommit
expr: sum(kube_resourcequota{job="kube-state-metrics",resource="memory",type="hard"})
  / sum(kube_node_status_allocatable_memory_bytes{job="node-exporter"}) >
  1.5
for: 5m
labels:
  severity: warning
annotations:
  message: Cluster has overcommitted memory resource requests for Namespaces.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubememovercommit
KubeQuotaExceeded (0 active)
alert: KubeQuotaExceeded
expr: 100
  * kube_resourcequota{job="kube-state-metrics",type="used"} / ignoring(instance,
  job, type) (kube_resourcequota{job="kube-state-metrics",type="hard"}
  > 0) > 90
for: 15m
labels:
  severity: warning
annotations:
  message: Namespace {{ $labels.namespace }} is using {{ printf "%0.0f" $value
    }}% of its {{ $labels.resource }} quota.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubequotaexceeded
/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-kubernetes-storage.yaml > kubernetes-storage
KubePersistentVolumeFullInFourDays (1 active)
alert: KubePersistentVolumeFullInFourDays
expr: 100
  * (kubelet_volume_stats_available_bytes{job="kubelet"} / kubelet_volume_stats_capacity_bytes{job="kubelet"})
  < 15 and predict_linear(kubelet_volume_stats_available_bytes{job="kubelet"}[6h],
  4 * 24 * 3600) < 0
for: 5m
labels:
  severity: critical
annotations:
  message: Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim
    }} in Namespace {{ $labels.namespace }} is expected to fill up within four days.
    Currently {{ printf "%0.2f" $value }}% is available.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumefullinfourdays
Labels State Active Since Value
alertname="KubePersistentVolumeFullInFourDays" endpoint="https-metrics" instance="192.168.5.103:10250" job="kubelet" namespace="socialboost" node="socialboost3" persistentvolumeclaim="jira-data" service="prometheus-prometheus-oper-kubelet" severity="critical" firing 2025-12-10 06:19:38.880689489 +0000 UTC 0.05364516213272585
KubePersistentVolumeUsageCritical (2 active)
alert: KubePersistentVolumeUsageCritical
expr: 100
  * kubelet_volume_stats_available_bytes{job="kubelet"} / kubelet_volume_stats_capacity_bytes{job="kubelet"}
  < 3
for: 1m
labels:
  severity: critical
annotations:
  message: The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in
    Namespace {{ $labels.namespace }} is only {{ printf "%0.2f" $value }}%
    free.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumeusagecritical
Labels State Active Since Value
alertname="KubePersistentVolumeUsageCritical" endpoint="https-metrics" instance="192.168.5.103:10250" job="kubelet" namespace="socialboost" node="socialboost3" persistentvolumeclaim="jira-data" service="prometheus-prometheus-oper-kubelet" severity="critical" firing 2025-10-04 09:56:38.880689489 +0000 UTC 0.05364516213272585
alertname="KubePersistentVolumeUsageCritical" endpoint="https-metrics" instance="192.168.5.103:10250" job="kubelet" namespace="default" node="socialboost3" persistentvolumeclaim="registry-docker-registry" service="prometheus-prometheus-oper-kubelet" severity="critical" firing 2025-10-04 10:01:08.880689489 +0000 UTC 2.594532216805217
KubePersistentVolumeErrors (0 active)
alert: KubePersistentVolumeErrors
expr: kube_persistentvolume_status_phase{job="kube-state-metrics",phase=~"Failed|Pending"}
  > 0
for: 5m
labels:
  severity: critical
annotations:
  message: The persistent volume {{ $labels.persistentvolume }} has status {{ $labels.phase
    }}.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumeerrors
/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-kubernetes-system.yaml > kubernetes-system
KubeAPIErrorsHigh (0 active)
alert: KubeAPIErrorsHigh
expr: sum(rate(apiserver_request_total{code=~"^(?:5..)$",job="apiserver"}[5m]))
  / sum(rate(apiserver_request_total{job="apiserver"}[5m])) * 100 > 3
for: 10m
labels:
  severity: critical
annotations:
  message: API server is returning errors for {{ $value }}% of requests.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapierrorshigh
KubeAPIErrorsHigh (0 active)
alert: KubeAPIErrorsHigh
expr: sum
  by(resource, subresource, verb) (rate(apiserver_request_total{code=~"^(?:5..)$",job="apiserver"}[5m]))
  / sum by(resource, subresource, verb) (rate(apiserver_request_total{job="apiserver"}[5m]))
  * 100 > 5
for: 10m
labels:
  severity: warning
annotations:
  message: API server is returning errors for {{ $value }}% of requests for {{ $labels.verb
    }} {{ $labels.resource }} {{ $labels.subresource }}.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapierrorshigh
KubeAPIErrorsHigh (0 active)
alert: KubeAPIErrorsHigh
expr: sum
  by(resource, subresource, verb) (rate(apiserver_request_total{code=~"^(?:5..)$",job="apiserver"}[5m]))
  / sum by(resource, subresource, verb) (rate(apiserver_request_total{job="apiserver"}[5m]))
  * 100 > 10
for: 10m
labels:
  severity: critical
annotations:
  message: API server is returning errors for {{ $value }}% of requests for {{ $labels.verb
    }} {{ $labels.resource }} {{ $labels.subresource }}.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapierrorshigh
KubeAPIErrorsHigh (0 active)
alert: KubeAPIErrorsHigh
expr: sum(rate(apiserver_request_total{code=~"^(?:5..)$",job="apiserver"}[5m]))
  / sum(rate(apiserver_request_total{job="apiserver"}[5m])) * 100 > 1
for: 10m
labels:
  severity: warning
annotations:
  message: API server is returning errors for {{ $value }}% of requests.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapierrorshigh
KubeAPILatencyHigh (0 active)
alert: KubeAPILatencyHigh
expr: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile{job="apiserver",quantile="0.99",subresource!="log",verb!~"^(?:LIST|WATCH|WATCHLIST|PROXY|CONNECT)$"}
  > 1
for: 10m
labels:
  severity: warning
annotations:
  message: The API server has a 99th percentile latency of {{ $value }} seconds for
    {{ $labels.verb }} {{ $labels.resource }}.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapilatencyhigh
KubeAPILatencyHigh (0 active)
alert: KubeAPILatencyHigh
expr: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile{job="apiserver",quantile="0.99",subresource!="log",verb!~"^(?:LIST|WATCH|WATCHLIST|PROXY|CONNECT)$"}
  > 4
for: 10m
labels:
  severity: critical
annotations:
  message: The API server has a 99th percentile latency of {{ $value }} seconds for
    {{ $labels.verb }} {{ $labels.resource }}.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapilatencyhigh
KubeClientCertificateExpiration (0 active)
alert: KubeClientCertificateExpiration
expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"}
  > 0 and histogram_quantile(0.01, sum by(job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m])))
  < 604800
labels:
  severity: warning
annotations:
  message: A client certificate used to authenticate to the apiserver is expiring
    in less than 7.0 days.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclientcertificateexpiration
KubeClientCertificateExpiration (0 active)
alert: KubeClientCertificateExpiration
expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"}
  > 0 and histogram_quantile(0.01, sum by(job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m])))
  < 86400
labels:
  severity: critical
annotations:
  message: A client certificate used to authenticate to the apiserver is expiring
    in less than 24.0 hours.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclientcertificateexpiration
KubeClientErrors (0 active)
alert: KubeClientErrors
expr: sum
  by(instance, job) (rate(ksm_scrape_error_total{job="kube-state-metrics"}[5m]))
  > 0.1
for: 15m
labels:
  severity: warning
annotations:
  message: Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance
    }}' is experiencing {{ printf "%0.0f" $value }} errors / second.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclienterrors
KubeClientErrors (0 active)
alert: KubeClientErrors
expr: (sum
  by(instance, job) (rate(rest_client_requests_total{code=~"5.."}[5m])) /
  sum by(instance, job) (rate(rest_client_requests_total[5m]))) * 100 > 1
for: 15m
labels:
  severity: warning
annotations:
  message: Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance
    }}' is experiencing {{ printf "%0.0f" $value }}% errors.'
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeclienterrors
KubeNodeNotReady (0 active)
alert: KubeNodeNotReady
expr: kube_node_status_condition{condition="Ready",job="kube-state-metrics",status="true"}
  == 0
for: 1h
labels:
  severity: warning
annotations:
  message: '{{ $labels.node }} has been unready for more than an hour.'
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubenodenotready
KubeVersionMismatch (0 active)
alert: KubeVersionMismatch
expr: count(count
  by(gitVersion) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},
  "gitVersion", "$1", "gitVersion", "(v[0-9]*.[0-9]*.[0-9]*).*")))
  > 1
for: 1h
labels:
  severity: warning
annotations:
  message: There are {{ $value }} different semantic versions of Kubernetes components
    running.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeversionmismatch
KubeletTooManyPods (0 active)
alert: KubeletTooManyPods
expr: kubelet_running_pod_count{job="kubelet"}
  > 110 * 0.9
for: 15m
labels:
  severity: warning
annotations:
  message: Kubelet {{ $labels.instance }} is running {{ $value }} Pods, close to the
    limit of 110.
  runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubelettoomanypods
/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-node-network.yaml > node-network
NetworkReceiveErrors (0 active)
alert: NetworkReceiveErrors
expr: rate(node_network_receive_errs_total{device!~"veth.+",job="node-exporter"}[2m])
  > 0
for: 2m
labels:
  severity: warning
annotations:
  message: Network interface "{{ $labels.device }}" showing receive errors
    on node-exporter {{ $labels.namespace }}/{{ $labels.pod }}"
NetworkTransmitErrors (0 active)
alert: NetworkTransmitErrors
expr: rate(node_network_transmit_errs_total{device!~"veth.+",job="node-exporter"}[2m])
  > 0
for: 2m
labels:
  severity: warning
annotations:
  message: Network interface "{{ $labels.device }}" showing transmit errors
    on node-exporter {{ $labels.namespace }}/{{ $labels.pod }}"
NodeNetworkInterfaceFlapping (0 active)
alert: NodeNetworkInterfaceFlapping
expr: changes(node_network_up{device!~"veth.+",job="node-exporter"}[2m])
  > 2
for: 2m
labels:
  severity: warning
annotations:
  message: Network interface "{{ $labels.device }}" changing it's up status
    often on node-exporter {{ $labels.namespace }}/{{ $labels.pod }}"
/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-node-time.yaml > node-time
ClockSkewDetected (0 active)
alert: ClockSkewDetected
expr: abs(node_timex_offset_seconds{job="node-exporter"})
  > 0.05
for: 2m
labels:
  severity: warning
annotations:
  message: Clock skew detected on node-exporter {{ $labels.namespace }}/{{ $labels.pod
    }}. Ensure NTP is configured correctly on this host.
/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-prometheus-operator.yaml > prometheus-operator
PrometheusOperatorNodeLookupErrors (0 active)
alert: PrometheusOperatorNodeLookupErrors
expr: rate(prometheus_operator_node_address_lookup_errors_total{job="prometheus-prometheus-oper-operator",namespace="monitoring"}[5m])
  > 0.1
for: 10m
labels:
  severity: warning
annotations:
  message: Errors while reconciling Prometheus in {{ $labels.namespace }} Namespace.
PrometheusOperatorReconcileErrors (0 active)
alert: PrometheusOperatorReconcileErrors
expr: rate(prometheus_operator_reconcile_errors_total{job="prometheus-prometheus-oper-operator",namespace="monitoring"}[5m])
  > 0.1
for: 10m
labels:
  severity: warning
annotations:
  message: Errors while reconciling {{ $labels.controller }} in {{ $labels.namespace
    }} Namespace.
/etc/prometheus/rules/prometheus-prometheus-prometheus-oper-prometheus-rulefiles-0/monitoring-prometheus-prometheus-oper-prometheus.yaml > prometheus
PrometheusBadConfig (0 active)
alert: PrometheusBadConfig
expr: max_over_time(prometheus_config_last_reload_successful{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m])
  == 0
for: 10m
labels:
  severity: critical
annotations:
  description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to reload
    its configuration.
  summary: Failed Prometheus configuration reload.
PrometheusDuplicateTimestamps (0 active)
alert: PrometheusDuplicateTimestamps
expr: rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m])
  > 0
for: 10m
labels:
  severity: warning
annotations:
  description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{$value
    | humanize}} samples/s with different values but duplicated timestamp.
  summary: Prometheus is dropping samples with duplicate timestamps.
PrometheusErrorSendingAlertsToAnyAlertmanager (0 active)
alert: PrometheusErrorSendingAlertsToAnyAlertmanager
expr: min
  without(alertmanager) (rate(prometheus_notifications_errors_total{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m])
  / rate(prometheus_notifications_sent_total{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m]))
  * 100 > 3
for: 15m
labels:
  severity: critical
annotations:
  description: '{{ printf "%.1f" $value }}% minimum errors while sending alerts
    from Prometheus {{$labels.namespace}}/{{$labels.pod}} to any Alertmanager.'
  summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager.
PrometheusErrorSendingAlertsToSomeAlertmanagers (0 active)
alert: PrometheusErrorSendingAlertsToSomeAlertmanagers
expr: (rate(prometheus_notifications_errors_total{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m])
  / rate(prometheus_notifications_sent_total{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m]))
  * 100 > 1
for: 15m
labels:
  severity: warning
annotations:
  description: '{{ printf "%.1f" $value }}% errors while sending alerts from
    Prometheus {{$labels.namespace}}/{{$labels.pod}} to Alertmanager {{$labels.alertmanager}}.'
  summary: Prometheus has encountered more than 1% errors sending alerts to a specific
    Alertmanager.
PrometheusMissingRuleEvaluations (0 active)
alert: PrometheusMissingRuleEvaluations
expr: increase(prometheus_rule_group_iterations_missed_total{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m])
  > 0
for: 15m
labels:
  severity: warning
annotations:
  description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has missed {{ printf
    "%.0f" $value }} rule group evaluations in the last 5m.
  summary: Prometheus is missing rule evaluations due to slow rule group evaluation.
PrometheusNotConnectedToAlertmanagers (0 active)
alert: PrometheusNotConnectedToAlertmanagers
expr: max_over_time(prometheus_notifications_alertmanagers_discovered{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m])
  < 1
for: 10m
labels:
  severity: warning
annotations:
  description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not connected to
    any Alertmanagers.
  summary: Prometheus is not connected to any Alertmanagers.
PrometheusNotIngestingSamples (0 active)
alert: PrometheusNotIngestingSamples
expr: rate(prometheus_tsdb_head_samples_appended_total{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m])
  <= 0
for: 10m
labels:
  severity: warning
annotations:
  description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is not ingesting samples.
  summary: Prometheus is not ingesting samples.
PrometheusNotificationQueueRunningFull (0 active)
alert: PrometheusNotificationQueueRunningFull
expr: (predict_linear(prometheus_notifications_queue_length{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m],
  60 * 30) > min_over_time(prometheus_notifications_queue_capacity{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m]))
for: 15m
labels:
  severity: warning
annotations:
  description: Alert notification queue of Prometheus {{$labels.namespace}}/{{$labels.pod}}
    is running full.
  summary: Prometheus alert notification queue predicted to run full in less than
    30m.
PrometheusOutOfOrderTimestamps (0 active)
alert: PrometheusOutOfOrderTimestamps
expr: rate(prometheus_target_scrapes_sample_out_of_order_total{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m])
  > 0
for: 10m
labels:
  severity: warning
annotations:
  description: Prometheus {{$labels.namespace}}/{{$labels.pod}} is dropping {{$value
    | humanize}} samples/s with timestamps arriving out of order.
  summary: Prometheus drops samples with out-of-order timestamps.
PrometheusRemoteStorageFailures (0 active)
PrometheusRemoteWriteBehind (0 active)
alert: PrometheusRemoteWriteBehind
expr: (max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m])
  - on(job, instance) group_right() max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m]))
  > 120
for: 15m
labels:
  severity: critical
annotations:
  description: Prometheus {{$labels.namespace}}/{{$labels.pod}} remote write is {{
    printf "%.1f" $value }}s behind for queue {{$labels.queue}}.
  summary: Prometheus remote write is behind.
PrometheusRuleFailures (0 active)
alert: PrometheusRuleFailures
expr: increase(prometheus_rule_evaluation_failures_total{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[5m])
  > 0
for: 15m
labels:
  severity: critical
annotations:
  description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has failed to evaluate
    {{ printf "%.0f" $value }} rules in the last 5m.
  summary: Prometheus is failing rule evaluations.
PrometheusTSDBCompactionsFailing (0 active)
alert: PrometheusTSDBCompactionsFailing
expr: increase(prometheus_tsdb_compactions_failed_total{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[3h])
  > 0
for: 4h
labels:
  severity: warning
annotations:
  description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value
    | humanize}} compaction failures over the last 3h.
  summary: Prometheus has issues compacting blocks.
PrometheusTSDBReloadsFailing (0 active)
alert: PrometheusTSDBReloadsFailing
expr: increase(prometheus_tsdb_reloads_failures_total{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[3h])
  > 0
for: 4h
labels:
  severity: warning
annotations:
  description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value
    | humanize}} reload failures over the last 3h.
  summary: Prometheus has issues reloading blocks from disk.
PrometheusTSDBWALCorruptions (0 active)
alert: PrometheusTSDBWALCorruptions
expr: increase(tsdb_wal_corruptions_total{job="prometheus-prometheus-oper-prometheus",namespace="monitoring"}[3h])
  > 0
for: 4h
labels:
  severity: warning
annotations:
  description: Prometheus {{$labels.namespace}}/{{$labels.pod}} has detected {{$value
    | humanize}} corruptions of the write-ahead log (WAL) over the last 3h.
  summary: Prometheus is detecting WAL corruptions.