diff --git a/config/config_test.go b/config/config_test.go index 968b563e1e..0ded7b079e 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -2315,7 +2315,7 @@ var expectedErrors = []struct { }, { filename: "kubernetes_selectors_pod.bad.yml", - errMsg: "pod role supports only pod selectors", + errMsg: "pod role supports only pod, node selectors", }, { filename: "kubernetes_selectors_service.bad.yml", diff --git a/config/testdata/kubernetes_selectors_pod.bad.yml b/config/testdata/kubernetes_selectors_pod.bad.yml index 3a1a83abd2..24285c0ce0 100644 --- a/config/testdata/kubernetes_selectors_pod.bad.yml +++ b/config/testdata/kubernetes_selectors_pod.bad.yml @@ -6,3 +6,6 @@ scrape_configs: - role: "node" label: "foo=bar" field: "metadata.status=Running" + - role: "service" + label: "baz=que" + field: "metadata.status=Running" diff --git a/config/testdata/kubernetes_selectors_pod.good.yml b/config/testdata/kubernetes_selectors_pod.good.yml index 91da6ada17..49c17d72ce 100644 --- a/config/testdata/kubernetes_selectors_pod.good.yml +++ b/config/testdata/kubernetes_selectors_pod.good.yml @@ -11,3 +11,8 @@ scrape_configs: - role: "pod" label: "foo in (bar,baz)" field: "metadata.status=Running" + - role: pod + selectors: + - role: "node" + label: "foo=bar" + field: "metadata.status=Running" diff --git a/discovery/kubernetes/kubernetes.go b/discovery/kubernetes/kubernetes.go index 678f287ef5..6bbbafe8ea 100644 --- a/discovery/kubernetes/kubernetes.go +++ b/discovery/kubernetes/kubernetes.go @@ -194,7 +194,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(any) error) error { foundSelectorRoles := make(map[Role]struct{}) allowedSelectors := map[Role][]string{ - RolePod: {string(RolePod)}, + RolePod: {string(RolePod), string(RoleNode)}, RoleService: {string(RoleService)}, RoleEndpointSlice: {string(RolePod), string(RoleService), string(RoleEndpointSlice)}, RoleEndpoint: {string(RolePod), string(RoleService), string(RoleEndpoint)}, diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 1fed78b3a7..05b778bb59 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -286,6 +286,19 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { return tg } + // Filter out pods scheduled on nodes that are not in the node store, as + // these were filtered out by node selectors. + if p.withNodeMetadata { + _, exists, err := p.nodeInf.GetStore().GetByKey(pod.Spec.NodeName) + if err != nil { + p.logger.Error("failed to get node from store", "node", pod.Spec.NodeName, "err", err) + return tg + } + if !exists { + return tg + } + } + tg.Labels = podLabels(pod) tg.Labels[namespaceLabel] = lv(pod.Namespace) if p.withNodeMetadata { diff --git a/discovery/kubernetes/pod_test.go b/discovery/kubernetes/pod_test.go index db5db546d0..df8f42fcd3 100644 --- a/discovery/kubernetes/pod_test.go +++ b/discovery/kubernetes/pod_test.go @@ -627,3 +627,74 @@ func TestPodDiscoveryWithUpdatedNamespaceMetadata(t *testing.T) { }, }.Run(t) } + +func TestPodDiscoveryWithNodeSelector(t *testing.T) { + t.Parallel() + + workerNode := makeNode("worker-node", "10.0.0.1", "", map[string]string{"node-type": "worker"}, nil) + filteredNode := makeNode("filtered-node", "10.0.0.2", "", map[string]string{"node-type": "master"}, nil) + + attachMetadata := AttachMetadataConfig{ + Node: true, // necessary for node role selectos to work for pod role + } + n, c := makeDiscoveryWithMetadata(RolePod, NamespaceDiscovery{}, attachMetadata, workerNode, filteredNode) + n.selectors = roleSelector{ + node: resourceSelector{ + label: "node-type=worker", + }, + } + + podOnWorker := makePods("default") + podOnWorker.Name = "pod-on-worker" + podOnWorker.UID = types.UID("worker-pod-123") + podOnWorker.Spec.NodeName = "worker-node" + podOnWorker.Status.PodIP = "192.168.1.1" + + podOnFilteredNode := makePods("default") + podOnFilteredNode.Name = "pod-on-filtered-node" + podOnFilteredNode.UID = types.UID("filtered-pod-456") + podOnFilteredNode.Spec.NodeName = "filtered-node" + podOnFilteredNode.Status.PodIP = "192.168.1.2" + + k8sDiscoveryTest{ + discovery: n, + beforeRun: func() { + c.CoreV1().Pods("default").Create(context.Background(), podOnWorker, metav1.CreateOptions{}) + c.CoreV1().Pods("default").Create(context.Background(), podOnFilteredNode, metav1.CreateOptions{}) + }, + expectedMaxItems: 2, + expectedRes: map[string]*targetgroup.Group{ + "pod/default/pod-on-worker": { + Targets: []model.LabelSet{ + { + "__address__": "192.168.1.1:9000", + "__meta_kubernetes_pod_container_image": "testcontainer:latest", + "__meta_kubernetes_pod_container_name": "testcontainer", + "__meta_kubernetes_pod_container_port_name": "testport", + "__meta_kubernetes_pod_container_port_number": "9000", + "__meta_kubernetes_pod_container_port_protocol": "TCP", + "__meta_kubernetes_pod_container_init": "false", + "__meta_kubernetes_pod_container_id": "docker://a1b2c3d4e5f6", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_namespace": "default", + "__meta_kubernetes_pod_name": "pod-on-worker", + "__meta_kubernetes_pod_ip": "192.168.1.1", + "__meta_kubernetes_pod_ready": "true", + "__meta_kubernetes_pod_phase": "Running", + "__meta_kubernetes_pod_node_name": "worker-node", + "__meta_kubernetes_pod_host_ip": "2.3.4.5", + "__meta_kubernetes_pod_uid": "worker-pod-123", + "__meta_kubernetes_node_name": "worker-node", + "__meta_kubernetes_node_label_node_type": "worker", + "__meta_kubernetes_node_labelpresent_node_type": "true", + }, + Source: "pod/default/pod-on-worker", + }, + "pod/default/pod-on-filtered-node": { + Source: "pod/default/pod-on-filtered-node", + }, + }, + }.Run(t) +}