2014-06-06 19:40:48 -04:00
/ *
2016-06-02 20:25:58 -04:00
Copyright 2014 The Kubernetes Authors .
2014-06-06 19:40:48 -04:00
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
2014-06-18 19:01:49 -04:00
2015-10-09 23:58:57 -04:00
package endpoint
2014-06-06 19:40:48 -04:00
import (
2020-02-07 21:16:47 -05:00
"context"
2016-09-19 23:16:40 -04:00
"fmt"
2021-07-06 14:26:46 -04:00
"math"
2025-10-27 07:24:51 -04:00
"sync"
2015-04-16 19:18:02 -04:00
"time"
2014-06-06 19:40:48 -04:00
2019-07-24 05:01:42 -04:00
v1 "k8s.io/api/core/v1"
2017-01-13 12:48:50 -05:00
"k8s.io/apimachinery/pkg/api/errors"
2017-01-11 09:09:48 -05:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2023-06-26 14:38:36 -04:00
"k8s.io/apimachinery/pkg/conversion"
2017-01-11 09:09:48 -05:00
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
2017-06-23 16:56:37 -04:00
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
2019-02-12 14:31:34 -05:00
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
2017-06-23 16:56:37 -04:00
corelisters "k8s.io/client-go/listers/core/v1"
2017-01-24 09:11:51 -05:00
"k8s.io/client-go/tools/cache"
2017-07-07 16:59:32 -04:00
"k8s.io/client-go/tools/leaderelection/resourcelock"
2019-02-12 14:31:34 -05:00
"k8s.io/client-go/tools/record"
2017-01-27 10:20:40 -05:00
"k8s.io/client-go/util/workqueue"
2025-01-23 08:18:35 -05:00
"k8s.io/endpointslice"
2023-06-26 14:38:36 -04:00
endpointsliceutil "k8s.io/endpointslice/util"
2020-04-17 15:25:06 -04:00
"k8s.io/klog/v2"
2016-11-18 15:50:17 -05:00
"k8s.io/kubernetes/pkg/api/v1/endpoints"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
2017-11-08 17:34:54 -05:00
api "k8s.io/kubernetes/pkg/apis/core"
2019-08-19 15:55:37 -04:00
helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
2017-04-12 15:49:17 -04:00
"k8s.io/kubernetes/pkg/controller"
2019-11-15 15:30:42 -05:00
utillabels "k8s.io/kubernetes/pkg/util/labels"
2019-08-19 16:45:22 -04:00
utilnet "k8s.io/utils/net"
2014-06-06 19:40:48 -04:00
)
2015-04-16 19:18:02 -04:00
const (
2017-06-20 11:50:37 -04:00
// maxRetries is the number of times a service will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the
// sequence of delays between successive queuings of a service.
//
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
maxRetries = 15
2021-03-08 20:54:18 -05:00
// maxCapacity represents the maximum number of addresses that should be
// stored in an Endpoints resource. In a future release, this controller
// may truncate endpoints exceeding this length.
maxCapacity = 1000
2021-07-06 14:26:46 -04:00
// truncated is a possible value for `endpoints.kubernetes.io/over-capacity` annotation on an
// endpoint resource and indicates that the number of endpoints have been truncated to
// maxCapacity
truncated = "truncated"
2025-03-04 11:06:10 -05:00
2025-03-07 10:52:54 -05:00
// LabelManagedBy is a label for recognizing Endpoints managed by this controller.
LabelManagedBy = "endpoints.kubernetes.io/managed-by"
2025-03-04 11:06:10 -05:00
2025-03-07 10:52:54 -05:00
// ControllerName is the name of this controller
ControllerName = "endpoint-controller"
2015-04-16 19:18:02 -04:00
)
2020-10-01 03:08:48 -04:00
// NewEndpointController returns a new *Controller.
2023-12-13 03:11:08 -05:00
func NewEndpointController ( ctx context . Context , podInformer coreinformers . PodInformer , serviceInformer coreinformers . ServiceInformer ,
2020-10-01 03:08:48 -04:00
endpointsInformer coreinformers . EndpointsInformer , client clientset . Interface , endpointUpdatesBatchPeriod time . Duration ) * Controller {
2023-12-13 03:11:08 -05:00
broadcaster := record . NewBroadcaster ( record . WithContext ( ctx ) )
2025-03-07 10:52:54 -05:00
recorder := broadcaster . NewRecorder ( scheme . Scheme , v1 . EventSource { Component : ControllerName } )
2019-02-12 14:31:34 -05:00
2020-10-01 03:08:48 -04:00
e := & Controller {
2024-04-28 12:26:18 -04:00
client : client ,
queue : workqueue . NewTypedRateLimitingQueueWithConfig (
workqueue . DefaultTypedControllerRateLimiter [ string ] ( ) ,
workqueue . TypedRateLimitingQueueConfig [ string ] {
Name : "endpoint" ,
} ,
) ,
2025-10-17 18:58:24 -04:00
podQueue : workqueue . NewTypedRateLimitingQueue ( workqueue . DefaultTypedControllerRateLimiter [ * endpointsliceutil . PodProjectionKey ] ( ) ) ,
2017-06-19 11:47:29 -04:00
workerLoopPeriod : time . Second ,
2015-04-16 19:18:02 -04:00
}
2017-06-20 08:48:57 -04:00
serviceInformer . Informer ( ) . AddEventHandler ( cache . ResourceEventHandlerFuncs {
2019-10-22 07:57:28 -04:00
AddFunc : e . onServiceUpdate ,
2017-06-20 08:48:57 -04:00
UpdateFunc : func ( old , cur interface { } ) {
2019-10-22 07:57:28 -04:00
e . onServiceUpdate ( cur )
2015-04-16 19:18:02 -04:00
} ,
2019-10-22 07:57:28 -04:00
DeleteFunc : e . onServiceDelete ,
2017-06-20 08:48:57 -04:00
} )
2017-02-07 20:25:52 -05:00
e . serviceLister = serviceInformer . Lister ( )
e . servicesSynced = serviceInformer . Informer ( ) . HasSynced
2015-04-16 19:18:02 -04:00
2017-02-07 20:25:52 -05:00
podInformer . Informer ( ) . AddEventHandler ( cache . ResourceEventHandlerFuncs {
2025-10-17 18:58:24 -04:00
AddFunc : func ( obj interface { } ) { e . onPodUpdate ( nil , obj ) } ,
UpdateFunc : e . onPodUpdate ,
DeleteFunc : func ( obj interface { } ) { e . onPodUpdate ( obj , nil ) } ,
2016-04-14 14:00:52 -04:00
} )
2017-02-07 20:25:52 -05:00
e . podLister = podInformer . Lister ( )
e . podsSynced = podInformer . Informer ( ) . HasSynced
2015-04-16 19:18:02 -04:00
2020-07-20 20:22:54 -04:00
endpointsInformer . Informer ( ) . AddEventHandler ( cache . ResourceEventHandlerFuncs {
DeleteFunc : e . onEndpointsDelete ,
} )
2017-06-19 11:47:29 -04:00
e . endpointsLister = endpointsInformer . Lister ( )
e . endpointsSynced = endpointsInformer . Informer ( ) . HasSynced
2024-06-24 13:57:06 -04:00
e . staleEndpointsTracker = newStaleEndpointsTracker ( )
2023-06-26 14:38:36 -04:00
e . triggerTimeTracker = endpointsliceutil . NewTriggerTimeTracker ( )
2019-02-12 14:31:34 -05:00
e . eventBroadcaster = broadcaster
e . eventRecorder = recorder
2019-01-24 10:37:58 -05:00
2019-07-24 05:01:42 -04:00
e . endpointUpdatesBatchPeriod = endpointUpdatesBatchPeriod
2015-04-16 19:18:02 -04:00
return e
}
2020-10-01 03:08:48 -04:00
// Controller manages selector-based service endpoints.
type Controller struct {
2019-02-12 14:31:34 -05:00
client clientset . Interface
eventBroadcaster record . EventBroadcaster
eventRecorder record . EventRecorder
2015-04-16 19:18:02 -04:00
2017-02-07 20:25:52 -05:00
// serviceLister is able to list/get services and is populated by the shared informer passed to
// NewEndpointController.
serviceLister corelisters . ServiceLister
// servicesSynced returns true if the service shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
servicesSynced cache . InformerSynced
2015-04-16 19:18:02 -04:00
2017-02-07 20:25:52 -05:00
// podLister is able to list/get pods and is populated by the shared informer passed to
// NewEndpointController.
podLister corelisters . PodLister
// podsSynced returns true if the pod shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podsSynced cache . InformerSynced
2016-04-14 14:00:52 -04:00
2017-06-19 11:47:29 -04:00
// endpointsLister is able to list/get endpoints and is populated by the shared informer passed to
// NewEndpointController.
endpointsLister corelisters . EndpointsLister
// endpointsSynced returns true if the endpoints shared informer has been synced at least once.
// Added as a member to the struct to allow injection for testing.
endpointsSynced cache . InformerSynced
2024-06-24 13:57:06 -04:00
// staleEndpointsTracker can help determine if a cached Endpoints is out of date.
staleEndpointsTracker * staleEndpointsTracker
2017-06-19 11:47:29 -04:00
2015-04-16 19:18:02 -04:00
// Services that need to be updated. A channel is inappropriate here,
// because it allows services with lots of pods to be serviced much
// more often than services with few pods; it also would cause a
// service that's inserted multiple times to be processed more than
// necessary.
2024-04-28 12:26:18 -04:00
queue workqueue . TypedRateLimitingInterface [ string ]
2017-06-19 11:47:29 -04:00
2025-10-17 18:58:24 -04:00
// podQueue is used to compute pod->services mapping and drive matching services into the service queue.
// This operation can be expensive when large number of services exist in the pod's namespace and
// label selection logic has to be evaluated against each service.
podQueue workqueue . TypedRateLimitingInterface [ * endpointsliceutil . PodProjectionKey ]
2017-06-19 11:47:29 -04:00
// workerLoopPeriod is the time between worker runs. The workers process the queue of service and pod changes.
workerLoopPeriod time . Duration
2019-01-24 10:37:58 -05:00
// triggerTimeTracker is an util used to compute and export the EndpointsLastChangeTriggerTime
// annotation.
2023-06-26 14:38:36 -04:00
triggerTimeTracker * endpointsliceutil . TriggerTimeTracker
2019-07-24 05:01:42 -04:00
endpointUpdatesBatchPeriod time . Duration
2014-06-06 19:40:48 -04:00
}
2017-11-03 10:40:03 -04:00
// Run will not return until stopCh is closed. workers determines how many
2015-04-16 19:18:02 -04:00
// endpoints will be handled in parallel.
2021-04-22 14:27:59 -04:00
func ( e * Controller ) Run ( ctx context . Context , workers int ) {
2016-01-15 02:32:10 -05:00
defer utilruntime . HandleCrash ( )
2022-05-28 15:51:14 -04:00
// Start events processing pipeline.
2023-12-13 03:11:08 -05:00
e . eventBroadcaster . StartStructuredLogging ( 3 )
2022-05-28 15:51:14 -04:00
e . eventBroadcaster . StartRecordingToSink ( & v1core . EventSinkImpl { Interface : e . client . CoreV1 ( ) . Events ( "" ) } )
defer e . eventBroadcaster . Shutdown ( )
2023-03-20 09:25:15 -04:00
logger := klog . FromContext ( ctx )
logger . Info ( "Starting endpoint controller" )
2025-10-27 07:24:51 -04:00
var wg sync . WaitGroup
defer func ( ) {
logger . Info ( "Shutting down endpoint controller" )
e . queue . ShutDown ( )
e . podQueue . ShutDown ( )
wg . Wait ( )
} ( )
2017-04-12 15:49:17 -04:00
2025-09-05 02:35:51 -04:00
if ! cache . WaitForNamedCacheSyncWithContext ( ctx , e . podsSynced , e . servicesSynced , e . endpointsSynced ) {
2016-09-15 06:30:06 -04:00
return
}
2015-04-16 19:18:02 -04:00
for i := 0 ; i < workers ; i ++ {
2025-10-27 07:24:51 -04:00
wg . Go ( func ( ) {
wait . UntilWithContext ( ctx , e . worker , e . workerLoopPeriod )
} )
wg . Go ( func ( ) {
wait . UntilWithContext ( ctx , e . podWorker , e . workerLoopPeriod )
} )
2014-07-12 03:15:30 -04:00
}
2025-10-27 07:24:51 -04:00
wg . Go ( func ( ) {
2015-04-24 17:16:27 -04:00
e . checkLeftoverEndpoints ( )
2025-10-27 07:24:51 -04:00
} )
2021-04-22 14:27:59 -04:00
<- ctx . Done ( )
2014-07-12 03:15:30 -04:00
}
2019-08-19 16:45:22 -04:00
func podToEndpointAddressForService ( svc * v1 . Service , pod * v1 . Pod ) ( * v1 . EndpointAddress , error ) {
2020-05-24 11:55:09 -04:00
var endpointIP string
2025-02-11 17:17:02 -05:00
wantIPv6 := svc . Spec . IPFamilies [ 0 ] == v1 . IPv6Protocol
dual stack services (#91824)
* api: structure change
* api: defaulting, conversion, and validation
* [FIX] validation: auto remove second ip/family when service changes to SingleStack
* [FIX] api: defaulting, conversion, and validation
* api-server: clusterIPs alloc, printers, storage and strategy
* [FIX] clusterIPs default on read
* alloc: auto remove second ip/family when service changes to SingleStack
* api-server: repair loop handling for clusterIPs
* api-server: force kubernetes default service into single stack
* api-server: tie dualstack feature flag with endpoint feature flag
* controller-manager: feature flag, endpoint, and endpointSlice controllers handling multi family service
* [FIX] controller-manager: feature flag, endpoint, and endpointSlicecontrollers handling multi family service
* kube-proxy: feature-flag, utils, proxier, and meta proxier
* [FIX] kubeproxy: call both proxier at the same time
* kubenet: remove forced pod IP sorting
* kubectl: modify describe to include ClusterIPs, IPFamilies, and IPFamilyPolicy
* e2e: fix tests that depends on IPFamily field AND add dual stack tests
* e2e: fix expected error message for ClusterIP immutability
* add integration tests for dualstack
the third phase of dual stack is a very complex change in the API,
basically it introduces Dual Stack services. Main changes are:
- It pluralizes the Service IPFamily field to IPFamilies,
and removes the singular field.
- It introduces a new field IPFamilyPolicyType that can take
3 values to express the "dual-stack(mad)ness" of the cluster:
SingleStack, PreferDualStack and RequireDualStack
- It pluralizes ClusterIP to ClusterIPs.
The goal is to add coverage to the services API operations,
taking into account the 6 different modes a cluster can have:
- single stack: IP4 or IPv6 (as of today)
- dual stack: IPv4 only, IPv6 only, IPv4 - IPv6, IPv6 - IPv4
* [FIX] add integration tests for dualstack
* generated data
* generated files
Co-authored-by: Antonio Ojea <aojea@redhat.com>
2020-10-26 16:15:59 -04:00
2025-02-11 17:43:17 -05:00
// Find an IP that matches the family. We parse and restringify the IP in case the
// value on the Pod is in an irregular format.
2021-09-24 19:30:22 -04:00
for _ , podIP := range pod . Status . PodIPs {
2025-02-11 17:43:17 -05:00
ip := utilnet . ParseIPSloppy ( podIP . IP )
if wantIPv6 == utilnet . IsIPv6 ( ip ) {
endpointIP = ip . String ( )
2021-09-24 19:30:22 -04:00
break
2020-05-24 11:55:09 -04:00
}
dual stack services (#91824)
* api: structure change
* api: defaulting, conversion, and validation
* [FIX] validation: auto remove second ip/family when service changes to SingleStack
* [FIX] api: defaulting, conversion, and validation
* api-server: clusterIPs alloc, printers, storage and strategy
* [FIX] clusterIPs default on read
* alloc: auto remove second ip/family when service changes to SingleStack
* api-server: repair loop handling for clusterIPs
* api-server: force kubernetes default service into single stack
* api-server: tie dualstack feature flag with endpoint feature flag
* controller-manager: feature flag, endpoint, and endpointSlice controllers handling multi family service
* [FIX] controller-manager: feature flag, endpoint, and endpointSlicecontrollers handling multi family service
* kube-proxy: feature-flag, utils, proxier, and meta proxier
* [FIX] kubeproxy: call both proxier at the same time
* kubenet: remove forced pod IP sorting
* kubectl: modify describe to include ClusterIPs, IPFamilies, and IPFamilyPolicy
* e2e: fix tests that depends on IPFamily field AND add dual stack tests
* e2e: fix expected error message for ClusterIP immutability
* add integration tests for dualstack
the third phase of dual stack is a very complex change in the API,
basically it introduces Dual Stack services. Main changes are:
- It pluralizes the Service IPFamily field to IPFamilies,
and removes the singular field.
- It introduces a new field IPFamilyPolicyType that can take
3 values to express the "dual-stack(mad)ness" of the cluster:
SingleStack, PreferDualStack and RequireDualStack
- It pluralizes ClusterIP to ClusterIPs.
The goal is to add coverage to the services API operations,
taking into account the 6 different modes a cluster can have:
- single stack: IP4 or IPv6 (as of today)
- dual stack: IPv4 only, IPv6 only, IPv4 - IPv6, IPv6 - IPv4
* [FIX] add integration tests for dualstack
* generated data
* generated files
Co-authored-by: Antonio Ojea <aojea@redhat.com>
2020-10-26 16:15:59 -04:00
}
if endpointIP == "" {
return nil , fmt . Errorf ( "failed to find a matching endpoint for service %v" , svc . Name )
2019-08-19 16:45:22 -04:00
}
2017-08-18 16:09:46 -04:00
return & v1 . EndpointAddress {
2020-05-24 11:55:09 -04:00
IP : endpointIP ,
2017-08-18 16:09:46 -04:00
NodeName : & pod . Spec . NodeName ,
TargetRef : & v1 . ObjectReference {
2022-02-17 01:10:49 -05:00
Kind : "Pod" ,
Namespace : pod . ObjectMeta . Namespace ,
Name : pod . ObjectMeta . Name ,
UID : pod . ObjectMeta . UID ,
2020-05-24 11:55:09 -04:00
} ,
} , nil
2017-08-18 16:09:46 -04:00
}
2019-10-22 07:57:28 -04:00
// onServiceUpdate updates the Service Selector in the cache and queues the Service for processing.
2020-10-01 03:08:48 -04:00
func ( e * Controller ) onServiceUpdate ( obj interface { } ) {
2019-10-22 07:57:28 -04:00
key , err := controller . KeyFunc ( obj )
if err != nil {
utilruntime . HandleError ( fmt . Errorf ( "Couldn't get key for object %+v: %v" , obj , err ) )
return
}
e . queue . Add ( key )
}
// onServiceDelete removes the Service Selector from the cache and queues the Service for processing.
2020-10-01 03:08:48 -04:00
func ( e * Controller ) onServiceDelete ( obj interface { } ) {
2018-08-03 04:41:13 -04:00
key , err := controller . KeyFunc ( obj )
2015-04-16 19:18:02 -04:00
if err != nil {
2016-09-15 06:30:06 -04:00
utilruntime . HandleError ( fmt . Errorf ( "Couldn't get key for object %+v: %v" , obj , err ) )
2016-05-29 08:44:20 -04:00
return
2015-04-16 19:18:02 -04:00
}
e . queue . Add ( key )
}
2025-10-17 18:58:24 -04:00
// onPodUpdate enqueues the pod's projection key on Add/Update/Delete events, to find matching services later.
func ( e * Controller ) onPodUpdate ( old , cur interface { } ) {
key := endpointsliceutil . GetPodUpdateProjectionKey ( old , cur )
if key != nil {
e . podQueue . Add ( key )
}
}
2020-10-01 03:08:48 -04:00
func ( e * Controller ) onEndpointsDelete ( obj interface { } ) {
2020-07-20 20:22:54 -04:00
key , err := controller . KeyFunc ( obj )
if err != nil {
utilruntime . HandleError ( fmt . Errorf ( "Couldn't get key for object %+v: %v" , obj , err ) )
return
}
e . queue . Add ( key )
}
2015-04-16 19:18:02 -04:00
// worker runs a worker thread that just dequeues items, processes them, and
// marks them done. You may run as many of these in parallel as you wish; the
// workqueue guarantees that they will not end up processing the same service
// at the same time.
2021-04-22 14:27:59 -04:00
func ( e * Controller ) worker ( ctx context . Context ) {
for e . processNextWorkItem ( ctx ) {
2015-04-16 19:18:02 -04:00
}
}
2015-03-20 17:24:43 -04:00
2021-04-22 14:27:59 -04:00
func ( e * Controller ) processNextWorkItem ( ctx context . Context ) bool {
2016-09-15 06:30:06 -04:00
eKey , quit := e . queue . Get ( )
if quit {
return false
}
defer e . queue . Done ( eKey )
2023-03-20 09:25:15 -04:00
logger := klog . FromContext ( ctx )
2024-04-28 12:26:18 -04:00
err := e . syncService ( ctx , eKey )
2023-03-20 09:25:15 -04:00
e . handleErr ( logger , err , eKey )
2017-06-20 11:50:37 -04:00
return true
}
2024-04-28 12:26:18 -04:00
func ( e * Controller ) handleErr ( logger klog . Logger , err error , key string ) {
2016-09-15 06:30:06 -04:00
if err == nil {
2017-06-20 11:50:37 -04:00
e . queue . Forget ( key )
return
2016-09-15 06:30:06 -04:00
}
2024-04-28 12:26:18 -04:00
ns , name , keyErr := cache . SplitMetaNamespaceKey ( key )
2020-05-29 08:27:22 -04:00
if keyErr != nil {
2023-03-20 09:25:15 -04:00
logger . Error ( err , "Failed to split meta namespace cache key" , "key" , key )
2020-05-29 08:27:22 -04:00
}
2017-06-20 11:50:37 -04:00
if e . queue . NumRequeues ( key ) < maxRetries {
2023-03-20 09:25:15 -04:00
logger . V ( 2 ) . Info ( "Error syncing endpoints, retrying" , "service" , klog . KRef ( ns , name ) , "err" , err )
2017-06-20 11:50:37 -04:00
e . queue . AddRateLimited ( key )
return
}
2016-09-15 06:30:06 -04:00
2023-03-20 09:25:15 -04:00
logger . Info ( "Dropping service out of the queue" , "service" , klog . KRef ( ns , name ) , "err" , err )
2017-06-20 11:50:37 -04:00
e . queue . Forget ( key )
utilruntime . HandleError ( err )
2016-09-15 06:30:06 -04:00
}
2021-04-22 14:27:59 -04:00
func ( e * Controller ) syncService ( ctx context . Context , key string ) error {
2015-04-16 19:18:02 -04:00
startTime := time . Now ( )
2023-03-20 09:25:15 -04:00
logger := klog . FromContext ( ctx )
2017-02-07 20:25:52 -05:00
namespace , name , err := cache . SplitMetaNamespaceKey ( key )
if err != nil {
return err
}
2023-03-20 09:25:15 -04:00
defer func ( ) {
logger . V ( 4 ) . Info ( "Finished syncing service endpoints" , "service" , klog . KRef ( namespace , name ) , "startTime" , time . Since ( startTime ) )
} ( )
2017-02-07 20:25:52 -05:00
service , err := e . serviceLister . Services ( namespace ) . Get ( name )
if err != nil {
2019-07-17 17:40:36 -04:00
if ! errors . IsNotFound ( err ) {
return err
}
2018-01-09 20:30:23 -05:00
// Delete the corresponding endpoint, as the service has been deleted.
// TODO: Please note that this will delete an endpoint when a
// service is deleted. However, if we're down at the time when
// the service is deleted, we will miss that deletion, so this
// doesn't completely solve the problem. See #6877.
2021-04-22 14:27:59 -04:00
err = e . client . CoreV1 ( ) . Endpoints ( namespace ) . Delete ( ctx , name , metav1 . DeleteOptions { } )
2018-01-09 20:30:23 -05:00
if err != nil && ! errors . IsNotFound ( err ) {
return err
}
2019-07-30 18:42:01 -04:00
e . triggerTimeTracker . DeleteService ( namespace , name )
2024-06-24 13:57:06 -04:00
e . staleEndpointsTracker . Delete ( namespace , name )
2016-09-15 06:30:06 -04:00
return nil
2015-04-16 19:18:02 -04:00
}
2023-01-18 06:12:34 -05:00
if service . Spec . Type == v1 . ServiceTypeExternalName {
// services with Type ExternalName receive no endpoints from this controller;
// Ref: https://issues.k8s.io/105986
return nil
}
2015-04-16 19:18:02 -04:00
if service . Spec . Selector == nil {
// services without a selector receive no endpoints from this controller;
// these services will receive the endpoints that are created out-of-band via the REST API.
2016-09-15 06:30:06 -04:00
return nil
2015-04-16 19:18:02 -04:00
}
2023-03-20 09:25:15 -04:00
logger . V ( 5 ) . Info ( "About to update endpoints for service" , "service" , klog . KRef ( namespace , name ) )
2017-02-07 20:25:52 -05:00
pods , err := e . podLister . Pods ( service . Namespace ) . List ( labels . Set ( service . Spec . Selector ) . AsSelectorPreValidated ( ) )
2015-04-16 19:18:02 -04:00
if err != nil {
// Since we're getting stuff from a local cache, it is
// basically impossible to get this error.
2016-09-15 06:30:06 -04:00
return err
2015-04-16 19:18:02 -04:00
}
2019-07-30 18:42:01 -04:00
// We call ComputeEndpointLastChangeTriggerTime here to make sure that the
// state of the trigger time tracker gets updated even if the sync turns out
// to be no-op and we don't update the endpoints object.
2019-01-24 10:37:58 -05:00
endpointsLastChangeTriggerTime := e . triggerTimeTracker .
2019-07-30 18:42:01 -04:00
ComputeEndpointLastChangeTriggerTime ( namespace , service , pods )
2019-01-24 10:37:58 -05:00
2017-06-09 11:22:37 -04:00
subsets := [ ] v1 . EndpointSubset { }
2019-01-11 05:55:50 -05:00
var totalReadyEps int
var totalNotReadyEps int
2015-04-16 19:18:02 -04:00
2017-06-09 11:22:37 -04:00
for _ , pod := range pods {
2023-06-26 14:38:36 -04:00
if ! endpointsliceutil . ShouldPodBeInEndpoints ( pod , service . Spec . PublishNotReadyAddresses ) {
2023-03-20 09:25:15 -04:00
logger . V ( 5 ) . Info ( "Pod is not included on endpoints for Service" , "pod" , klog . KObj ( pod ) , "service" , klog . KObj ( service ) )
2017-06-09 11:22:37 -04:00
continue
}
2015-04-16 19:18:02 -04:00
2019-08-19 16:45:22 -04:00
ep , err := podToEndpointAddressForService ( service , pod )
if err != nil {
// this will happen, if the cluster runs with some nodes configured as dual stack and some as not
// such as the case of an upgrade..
2023-03-20 09:25:15 -04:00
logger . V ( 2 ) . Info ( "Failed to find endpoint for service with ClusterIP on pod with error" , "service" , klog . KObj ( service ) , "clusterIP" , service . Spec . ClusterIP , "pod" , klog . KObj ( pod ) , "error" , err )
2019-08-19 16:45:22 -04:00
continue
}
2017-06-09 11:22:37 -04:00
2019-10-22 18:39:49 -04:00
epa := * ep
2023-06-26 14:38:36 -04:00
if endpointsliceutil . ShouldSetHostname ( pod , service ) {
2019-10-22 18:39:49 -04:00
epa . Hostname = pod . Spec . Hostname
2017-06-09 11:22:37 -04:00
}
2015-04-16 19:18:02 -04:00
2017-06-09 11:22:37 -04:00
// Allow headless service not to have ports.
if len ( service . Spec . Ports ) == 0 {
if service . Spec . ClusterIP == api . ClusterIPNone {
2023-03-20 09:25:15 -04:00
subsets , totalReadyEps , totalNotReadyEps = addEndpointSubset ( logger , subsets , pod , epa , nil , service . Spec . PublishNotReadyAddresses )
2018-04-12 18:42:26 -04:00
// No need to repack subsets for headless service without ports.
2016-04-14 13:45:29 -04:00
}
2017-06-09 11:22:37 -04:00
} else {
for i := range service . Spec . Ports {
servicePort := & service . Spec . Ports [ i ]
2025-01-23 08:18:35 -05:00
portNum , err := endpointslice . FindPort ( pod , servicePort )
2017-06-09 11:22:37 -04:00
if err != nil {
2023-03-20 09:25:15 -04:00
logger . V ( 4 ) . Info ( "Failed to find port for service" , "service" , klog . KObj ( service ) , "error" , err )
2017-06-09 11:22:37 -04:00
continue
}
2020-02-18 20:30:57 -05:00
epp := endpointPortFromServicePort ( servicePort , portNum )
2017-06-09 11:22:37 -04:00
var readyEps , notReadyEps int
2023-03-20 09:25:15 -04:00
subsets , readyEps , notReadyEps = addEndpointSubset ( logger , subsets , pod , epa , epp , service . Spec . PublishNotReadyAddresses )
2017-06-09 11:22:37 -04:00
totalReadyEps = totalReadyEps + readyEps
totalNotReadyEps = totalNotReadyEps + notReadyEps
2015-09-09 21:28:53 -04:00
}
2014-09-26 16:34:55 -04:00
}
2015-04-16 19:18:02 -04:00
}
2018-08-20 18:32:52 -04:00
subsets = endpoints . RepackSubsets ( subsets )
2014-09-26 16:34:55 -04:00
2015-04-16 19:18:02 -04:00
// See if there's actually an update here.
2017-06-19 11:47:29 -04:00
currentEndpoints , err := e . endpointsLister . Endpoints ( service . Namespace ) . Get ( service . Name )
2015-04-16 19:18:02 -04:00
if err != nil {
2022-07-15 07:38:08 -04:00
if ! errors . IsNotFound ( err ) {
2016-09-15 06:30:06 -04:00
return err
2014-06-06 19:40:48 -04:00
}
2022-07-15 07:38:08 -04:00
currentEndpoints = & v1 . Endpoints {
ObjectMeta : metav1 . ObjectMeta {
Name : service . Name ,
Labels : service . Labels ,
} ,
}
2024-06-24 13:57:06 -04:00
} else if e . staleEndpointsTracker . IsStale ( currentEndpoints ) {
return fmt . Errorf ( "endpoints informer cache is out of date, resource version %s already processed for endpoints %s" , currentEndpoints . ResourceVersion , key )
2014-06-06 19:40:48 -04:00
}
2016-02-02 13:59:54 -05:00
2017-05-12 13:01:54 -04:00
createEndpoints := len ( currentEndpoints . ResourceVersion ) == 0
2020-08-19 13:16:29 -04:00
// Compare the sorted subsets and labels
2022-02-11 10:43:36 -05:00
// When comparing the subsets, we ignore the difference in ResourceVersion of Pod to avoid unnecessary Endpoints
// updates caused by Pod updates that we don't care, e.g. annotation update.
2017-05-12 13:01:54 -04:00
if ! createEndpoints &&
2025-10-21 18:55:30 -04:00
( endpointSubsetsEqualIgnoreResourceVersion ( currentEndpoints . Subsets , subsets ) ||
// If the comparison fails, try again after repacking, it may be a difference in the hash algorithm
// For more context: https://github.com/kubernetes/kubernetes/issues/129652#issuecomment-3264035333
endpointSubsetsEqualIgnoreResourceVersion ( endpoints . RepackSubsets ( currentEndpoints . Subsets ) , subsets ) ) &&
2025-03-04 11:06:10 -05:00
labelsCorrectForEndpoints ( currentEndpoints . Labels , service . Labels ) &&
2021-03-08 20:54:18 -05:00
capacityAnnotationSetCorrectly ( currentEndpoints . Annotations , currentEndpoints . Subsets ) {
2023-03-20 09:25:15 -04:00
logger . V ( 5 ) . Info ( "endpoints are equal, skipping update" , "service" , klog . KObj ( service ) )
2016-09-15 06:30:06 -04:00
return nil
2015-04-16 19:18:02 -04:00
}
2017-08-15 08:14:21 -04:00
newEndpoints := currentEndpoints . DeepCopy ( )
2015-04-16 19:18:02 -04:00
newEndpoints . Subsets = subsets
newEndpoints . Labels = service . Labels
2016-02-02 13:59:54 -05:00
if newEndpoints . Annotations == nil {
newEndpoints . Annotations = make ( map [ string ] string )
}
2016-08-12 11:39:56 -04:00
2019-01-24 10:37:58 -05:00
if ! endpointsLastChangeTriggerTime . IsZero ( ) {
newEndpoints . Annotations [ v1 . EndpointsLastChangeTriggerTime ] =
2021-06-03 01:43:11 -04:00
endpointsLastChangeTriggerTime . UTC ( ) . Format ( time . RFC3339Nano )
2019-02-20 05:48:24 -05:00
} else { // No new trigger time, clear the annotation.
delete ( newEndpoints . Annotations , v1 . EndpointsLastChangeTriggerTime )
2019-01-24 10:37:58 -05:00
}
2021-07-06 14:26:46 -04:00
if truncateEndpoints ( newEndpoints ) {
newEndpoints . Annotations [ v1 . EndpointsOverCapacity ] = truncated
2021-03-08 20:54:18 -05:00
} else {
delete ( newEndpoints . Annotations , v1 . EndpointsOverCapacity )
}
2019-08-19 15:55:37 -04:00
if newEndpoints . Labels == nil {
newEndpoints . Labels = make ( map [ string ] string )
}
if ! helper . IsServiceIPSet ( service ) {
2019-11-15 15:30:42 -05:00
newEndpoints . Labels = utillabels . CloneAndAddLabel ( newEndpoints . Labels , v1 . IsHeadlessService , "" )
2019-08-19 15:55:37 -04:00
} else {
2019-11-15 15:30:42 -05:00
newEndpoints . Labels = utillabels . CloneAndRemoveLabel ( newEndpoints . Labels , v1 . IsHeadlessService )
2019-08-19 15:55:37 -04:00
}
2025-03-07 10:52:54 -05:00
newEndpoints . Labels [ LabelManagedBy ] = ControllerName
2019-08-19 15:55:37 -04:00
2023-03-20 09:25:15 -04:00
logger . V ( 4 ) . Info ( "Update endpoints" , "service" , klog . KObj ( service ) , "readyEndpoints" , totalReadyEps , "notreadyEndpoints" , totalNotReadyEps )
2024-09-03 17:54:53 -04:00
var updatedEndpoints * v1 . Endpoints
2016-08-12 11:39:56 -04:00
if createEndpoints {
2015-04-16 19:18:02 -04:00
// No previous endpoints, create them
2021-04-22 14:27:59 -04:00
_ , err = e . client . CoreV1 ( ) . Endpoints ( service . Namespace ) . Create ( ctx , newEndpoints , metav1 . CreateOptions { } )
2015-04-16 19:18:02 -04:00
} else {
// Pre-existing
2024-09-03 17:54:53 -04:00
updatedEndpoints , err = e . client . CoreV1 ( ) . Endpoints ( service . Namespace ) . Update ( ctx , newEndpoints , metav1 . UpdateOptions { } )
2015-04-16 19:18:02 -04:00
}
if err != nil {
2016-08-12 11:39:56 -04:00
if createEndpoints && errors . IsForbidden ( err ) {
// A request is forbidden primarily for two reasons:
// 1. namespace is terminating, endpoint creation is not allowed by default.
// 2. policy is misconfigured, in which case no service would function anywhere.
// Given the frequency of 1, we log at a lower level.
2023-03-20 09:25:15 -04:00
logger . V ( 5 ) . Info ( "Forbidden from creating endpoints" , "error" , err )
2019-10-20 16:04:07 -04:00
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
if errors . HasStatusCause ( err , v1 . NamespaceTerminatingCause ) {
return nil
}
2016-08-12 11:39:56 -04:00
}
2019-02-12 14:31:34 -05:00
if createEndpoints {
e . eventRecorder . Eventf ( newEndpoints , v1 . EventTypeWarning , "FailedToCreateEndpoint" , "Failed to create endpoint for service %v/%v: %v" , service . Namespace , service . Name , err )
} else {
e . eventRecorder . Eventf ( newEndpoints , v1 . EventTypeWarning , "FailedToUpdateEndpoint" , "Failed to update endpoint %v/%v: %v" , service . Namespace , service . Name , err )
}
2016-09-15 06:30:06 -04:00
return err
2015-04-16 19:18:02 -04:00
}
2024-06-24 13:57:06 -04:00
// If the current endpoints is updated we track the old resource version, so
// if we obtain this resource version again from the lister we know is outdated
// and we need to retry later to wait for the informer cache to be up-to-date.
2024-09-03 17:54:53 -04:00
// there are some operations (webhooks, truncated endpoints, ...) that can potentially cause endpoints updates became noop
// and return the same resourceVersion.
// Ref: https://issues.k8s.io/127370 , https://issues.k8s.io/126578
if updatedEndpoints != nil && updatedEndpoints . ResourceVersion != currentEndpoints . ResourceVersion {
2024-06-24 13:57:06 -04:00
e . staleEndpointsTracker . Stale ( currentEndpoints )
}
2016-09-15 06:30:06 -04:00
return nil
2014-06-06 19:40:48 -04:00
}
2014-08-11 03:34:59 -04:00
2025-10-17 18:58:24 -04:00
func ( e * Controller ) podWorker ( ctx context . Context ) {
for e . processNextPodWorkItem ( ctx ) {
}
}
func ( e * Controller ) processNextPodWorkItem ( ctx context . Context ) bool {
eKey , quit := e . podQueue . Get ( )
if quit {
return false
}
defer e . podQueue . Done ( eKey )
logger := klog . FromContext ( ctx )
err := e . syncPod ( logger , eKey )
e . handlePodErr ( logger , err , eKey )
return true
}
func ( e * Controller ) handlePodErr ( logger klog . Logger , err error , key * endpointsliceutil . PodProjectionKey ) {
if err == nil {
e . podQueue . Forget ( key )
return
}
if e . podQueue . NumRequeues ( key ) < maxRetries {
logger . V ( 2 ) . Info ( "Error syncing pod, retrying" , "PodProjectionKey" , * key )
e . podQueue . AddRateLimited ( key )
return
}
logger . Info ( "Dropping pod out of the queue" , "PodProjectionKey" , * key )
e . podQueue . Forget ( key )
utilruntime . HandleError ( err )
}
func ( e * Controller ) syncPod ( logger klog . Logger , key * endpointsliceutil . PodProjectionKey ) error {
startTime := time . Now ( )
defer func ( ) {
logger . V ( 4 ) . Info ( "Finished syncing pod" , "PodProjectionKey" , * key , "elapsedTime" , time . Since ( startTime ) )
} ( )
servicesToUpdate , err := endpointsliceutil . GetServicesToUpdate ( e . serviceLister , key )
if err != nil {
return err
}
for service := range servicesToUpdate {
e . queue . AddAfter ( service , e . endpointUpdatesBatchPeriod )
}
return nil
}
2015-04-24 17:16:27 -04:00
// checkLeftoverEndpoints lists all currently existing endpoints and adds their
// service to the queue. This will detect endpoints that exist with no
// corresponding service; these endpoints need to be deleted. We only need to
// do this once on startup, because in steady-state these are detected (but
// some stragglers could have been left behind if the endpoint controller
// reboots).
2020-10-01 03:08:48 -04:00
func ( e * Controller ) checkLeftoverEndpoints ( ) {
2017-06-19 11:47:29 -04:00
list , err := e . endpointsLister . List ( labels . Everything ( ) )
2015-04-24 17:16:27 -04:00
if err != nil {
2016-09-15 06:30:06 -04:00
utilruntime . HandleError ( fmt . Errorf ( "Unable to list endpoints (%v); orphaned endpoints will not be cleaned up. (They're pretty harmless, but you can restart this component if you want another attempt made.)" , err ) )
2015-04-24 17:16:27 -04:00
return
}
2017-06-19 11:47:29 -04:00
for _ , ep := range list {
2017-05-08 05:16:33 -04:00
if _ , ok := ep . Annotations [ resourcelock . LeaderElectionRecordAnnotationKey ] ; ok {
// when there are multiple controller-manager instances,
// we observe that it will delete leader-election endpoints after 5min
// and cause re-election
// so skip the delete here
// as leader-election only have endpoints without service
continue
}
2018-08-03 04:41:13 -04:00
key , err := controller . KeyFunc ( ep )
2015-04-24 17:16:27 -04:00
if err != nil {
2016-09-15 06:30:06 -04:00
utilruntime . HandleError ( fmt . Errorf ( "Unable to get key for endpoint %#v" , ep ) )
2015-04-24 17:16:27 -04:00
continue
}
e . queue . Add ( key )
}
}
2017-06-09 11:22:37 -04:00
2022-05-22 06:21:24 -04:00
// addEndpointSubset add the endpoints addresses and ports to the EndpointSubset.
// The addresses are added to the corresponding field, ready or not ready, depending
// on the pod status and the Service PublishNotReadyAddresses field value.
2022-05-27 17:10:59 -04:00
// The pod passed to this function must have already been filtered through ShouldPodBeInEndpoints.
2023-03-20 09:25:15 -04:00
func addEndpointSubset ( logger klog . Logger , subsets [ ] v1 . EndpointSubset , pod * v1 . Pod , epa v1 . EndpointAddress ,
2018-04-12 18:42:26 -04:00
epp * v1 . EndpointPort , tolerateUnreadyEndpoints bool ) ( [ ] v1 . EndpointSubset , int , int ) {
2019-01-11 05:55:50 -05:00
var readyEps int
var notReadyEps int
2018-04-12 18:42:26 -04:00
ports := [ ] v1 . EndpointPort { }
if epp != nil {
ports = append ( ports , * epp )
}
2017-06-09 11:22:37 -04:00
if tolerateUnreadyEndpoints || podutil . IsPodReady ( pod ) {
subsets = append ( subsets , v1 . EndpointSubset {
Addresses : [ ] v1 . EndpointAddress { epa } ,
2018-04-12 18:42:26 -04:00
Ports : ports ,
2017-06-09 11:22:37 -04:00
} )
readyEps ++
2022-05-22 06:21:24 -04:00
} else { // if it is not a ready address it has to be not ready
2023-03-20 09:25:15 -04:00
logger . V ( 5 ) . Info ( "Pod is out of service" , "pod" , klog . KObj ( pod ) )
2017-06-09 11:22:37 -04:00
subsets = append ( subsets , v1 . EndpointSubset {
NotReadyAddresses : [ ] v1 . EndpointAddress { epa } ,
2018-04-12 18:42:26 -04:00
Ports : ports ,
2017-06-09 11:22:37 -04:00
} )
notReadyEps ++
}
return subsets , readyEps , notReadyEps
}
2017-06-14 03:54:33 -04:00
2020-02-18 20:30:57 -05:00
func endpointPortFromServicePort ( servicePort * v1 . ServicePort , portNum int ) * v1 . EndpointPort {
2022-07-15 07:38:08 -04:00
return & v1 . EndpointPort {
2020-11-06 20:46:32 -05:00
Name : servicePort . Name ,
Port : int32 ( portNum ) ,
Protocol : servicePort . Protocol ,
AppProtocol : servicePort . AppProtocol ,
2020-02-18 20:30:57 -05:00
}
}
2021-03-08 20:54:18 -05:00
2021-07-06 14:26:46 -04:00
// capacityAnnotationSetCorrectly returns false if number of endpoints is greater than maxCapacity or
// returns true if underCapacity and the annotation is not set.
func capacityAnnotationSetCorrectly ( annotations map [ string ] string , subsets [ ] v1 . EndpointSubset ) bool {
2021-03-08 20:54:18 -05:00
numEndpoints := 0
for _ , subset := range subsets {
numEndpoints += len ( subset . Addresses ) + len ( subset . NotReadyAddresses )
}
2021-07-06 14:26:46 -04:00
if numEndpoints > maxCapacity {
// If subsets are over capacity, they must be truncated so consider
// the annotation as not set correctly
return false
}
_ , ok := annotations [ v1 . EndpointsOverCapacity ]
return ! ok
2021-03-08 20:54:18 -05:00
}
2021-07-06 14:26:46 -04:00
// truncateEndpoints by best effort will distribute the endpoints over the subsets based on the proportion
// of endpoints per subset and will prioritize Ready Endpoints over NotReady Endpoints.
func truncateEndpoints ( endpoints * v1 . Endpoints ) bool {
totalReady := 0
totalNotReady := 0
for _ , subset := range endpoints . Subsets {
totalReady += len ( subset . Addresses )
totalNotReady += len ( subset . NotReadyAddresses )
2021-03-08 20:54:18 -05:00
}
2021-07-06 14:26:46 -04:00
if totalReady + totalNotReady <= maxCapacity {
return false
}
truncateReady := false
max := maxCapacity - totalReady
numTotal := totalNotReady
if totalReady > maxCapacity {
truncateReady = true
max = maxCapacity
numTotal = totalReady
}
canBeAdded := max
for i := range endpoints . Subsets {
subset := endpoints . Subsets [ i ]
numInSubset := len ( subset . Addresses )
if ! truncateReady {
numInSubset = len ( subset . NotReadyAddresses )
}
// The number of endpoints per subset will be based on the propotion of endpoints
// in this subset versus the total number of endpoints. The proportion of endpoints
// will be rounded up which most likely will lead to the last subset having less
// endpoints than the expected proportion.
toBeAdded := int ( math . Ceil ( ( float64 ( numInSubset ) / float64 ( numTotal ) ) * float64 ( max ) ) )
// If there is not enough endpoints for the last subset, ensure only the number up
// to the capacity are added
if toBeAdded > canBeAdded {
toBeAdded = canBeAdded
}
if truncateReady {
// Truncate ready Addresses to allocated proportion and truncate all not ready
// addresses
subset . Addresses = addressSubset ( subset . Addresses , toBeAdded )
subset . NotReadyAddresses = [ ] v1 . EndpointAddress { }
canBeAdded -= len ( subset . Addresses )
} else {
// Only truncate the not ready addresses
subset . NotReadyAddresses = addressSubset ( subset . NotReadyAddresses , toBeAdded )
canBeAdded -= len ( subset . NotReadyAddresses )
}
endpoints . Subsets [ i ] = subset
}
return true
}
// addressSubset takes a list of addresses and returns a subset if the length is greater
// than the maxNum. If less than the maxNum, the entire list is returned.
func addressSubset ( addresses [ ] v1 . EndpointAddress , maxNum int ) [ ] v1 . EndpointAddress {
if len ( addresses ) <= maxNum {
return addresses
}
return addresses [ 0 : maxNum ]
2021-03-08 20:54:18 -05:00
}
2023-06-26 14:38:36 -04:00
// semanticIgnoreResourceVersion does semantic deep equality checks for objects
// but excludes ResourceVersion of ObjectReference. They are used when comparing
// endpoints in Endpoints and EndpointSlice objects to avoid unnecessary updates
// caused by Pod resourceVersion change.
var semanticIgnoreResourceVersion = conversion . EqualitiesOrDie (
func ( a , b v1 . ObjectReference ) bool {
a . ResourceVersion = ""
b . ResourceVersion = ""
return a == b
} ,
)
// endpointSubsetsEqualIgnoreResourceVersion returns true if EndpointSubsets
// have equal attributes but excludes ResourceVersion of Pod.
func endpointSubsetsEqualIgnoreResourceVersion ( subsets1 , subsets2 [ ] v1 . EndpointSubset ) bool {
return semanticIgnoreResourceVersion . DeepEqual ( subsets1 , subsets2 )
}
2025-03-04 11:06:10 -05:00
// labelsCorrectForEndpoints tests that epLabels is correctly derived from svcLabels
// (ignoring the v1.IsHeadlessService label).
func labelsCorrectForEndpoints ( epLabels , svcLabels map [ string ] string ) bool {
2025-03-07 10:52:54 -05:00
if epLabels [ LabelManagedBy ] != ControllerName {
2025-03-04 11:06:10 -05:00
return false
}
2025-03-07 10:52:54 -05:00
// Every label in epLabels except v1.IsHeadlessService and LabelManagedBy should
2025-03-04 11:06:10 -05:00
// correspond to a label in svcLabels, and svcLabels should not have any other
// labels that aren't in epLabels.
skipped := 0
for k , v := range epLabels {
2025-03-07 10:52:54 -05:00
if k == v1 . IsHeadlessService || k == LabelManagedBy {
2025-03-04 11:06:10 -05:00
skipped ++
} else if sv , exists := svcLabels [ k ] ; ! exists || sv != v {
return false
}
}
return len ( svcLabels ) == len ( epLabels ) - skipped
}