Move node password secrets into dedicated controller
Some checks are pending
Scorecard supply-chain security / Scorecard analysis (push) Waiting to run

Move the node password secret cleanup into its own dedicated controller
that also handles auth. We now use a filtered cache of only
node-password secrets, instead of using the wrangler secret cache,
which stores all secrets from all namespaces.

The coredns node-hosts controller also now uses a single-resource
watch cache on the coredns configmap, instead of reading it from
the apiserver every time a node changes.

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
Brad Davidson 2025-10-22 08:04:53 +00:00 committed by Brad Davidson
parent 139d64c129
commit d8790220ff
10 changed files with 620 additions and 146 deletions

View file

@ -133,8 +133,7 @@ func (c *Cluster) setupEtcdProxy(ctx context.Context, etcdProxy etcd.Proxy) {
// deleteNodePasswdSecret wipes out the node password secret after restoration
func (c *Cluster) deleteNodePasswdSecret(ctx context.Context) {
nodeName := os.Getenv("NODE_NAME")
secretsClient := c.config.Runtime.Core.Core().V1().Secret()
if err := nodepassword.Delete(secretsClient, nodeName); err != nil {
if err := nodepassword.Delete(nodeName); err != nil {
if apierrors.IsNotFound(err) {
logrus.Debugf("Node password secret is not found for node %s", nodeName)
return

View file

@ -7,76 +7,67 @@ import (
"sort"
"strings"
"github.com/k3s-io/k3s/pkg/nodepassword"
pkgerrors "github.com/pkg/errors"
coreclient "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
"github.com/sirupsen/logrus"
core "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/kubernetes"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
toolscache "k8s.io/client-go/tools/cache"
)
func Register(ctx context.Context,
modCoreDNS bool,
secrets coreclient.SecretController,
configMaps coreclient.ConfigMapController,
coreClient kubernetes.Interface,
nodes coreclient.NodeController,
) error {
// create a single-resource watch cache on the coredns configmap so that we
// don't have to retrieve it from the apiserver every time a node changes.
lw := toolscache.NewListWatchFromClient(coreClient.CoreV1().RESTClient(), "configmaps", metav1.NamespaceSystem, fields.OneTermEqualSelector(metav1.ObjectNameField, "coredns"))
informerOpts := toolscache.InformerOptions{ListerWatcher: lw, ObjectType: &corev1.ConfigMap{}, Handler: &toolscache.ResourceEventHandlerFuncs{}}
indexer, informer := toolscache.NewInformerWithOptions(informerOpts)
go informer.Run(ctx.Done())
h := &handler{
modCoreDNS: modCoreDNS,
secrets: secrets,
configMaps: configMaps,
modCoreDNS: modCoreDNS,
ctx: ctx,
configMaps: coreClient.CoreV1().ConfigMaps(metav1.NamespaceSystem),
configMapsStore: indexer,
}
nodes.OnChange(ctx, "node", h.onChange)
nodes.OnRemove(ctx, "node", h.onRemove)
nodes.OnChange(ctx, "node", h.updateHosts)
nodes.OnRemove(ctx, "node", h.updateHosts)
return nil
}
type handler struct {
modCoreDNS bool
secrets coreclient.SecretController
configMaps coreclient.ConfigMapController
modCoreDNS bool
ctx context.Context
configMaps typedcorev1.ConfigMapInterface
configMapsStore toolscache.Store
}
func (h *handler) onChange(key string, node *core.Node) (*core.Node, error) {
if node == nil {
return nil, nil
}
return h.updateHosts(node, false)
}
func (h *handler) onRemove(key string, node *core.Node) (*core.Node, error) {
return h.updateHosts(node, true)
}
func (h *handler) updateHosts(node *core.Node, removed bool) (*core.Node, error) {
var (
nodeName string
hostName string
nodeIPv4 string
nodeIPv6 string
)
nodeName = node.Name
for _, address := range node.Status.Addresses {
switch address.Type {
case v1.NodeInternalIP:
if strings.Contains(address.Address, ":") {
nodeIPv6 = address.Address
} else {
nodeIPv4 = address.Address
func (h *handler) updateHosts(key string, node *corev1.Node) (*corev1.Node, error) {
if h.modCoreDNS && node != nil {
var (
hostName string
nodeIPv4 string
nodeIPv6 string
)
for _, address := range node.Status.Addresses {
switch address.Type {
case corev1.NodeInternalIP:
if strings.Contains(address.Address, ":") {
nodeIPv6 = address.Address
} else {
nodeIPv4 = address.Address
}
case corev1.NodeHostName:
hostName = address.Address
}
case v1.NodeHostName:
hostName = address.Address
}
}
if removed {
if err := h.removeNodePassword(nodeName); err != nil {
logrus.Warn(pkgerrors.WithMessage(err, "Unable to remove node password"))
}
}
if h.modCoreDNS {
if err := h.updateCoreDNSConfigMap(nodeName, hostName, nodeIPv4, nodeIPv6, removed); err != nil {
if err := h.updateCoreDNSConfigMap(node.Name, hostName, nodeIPv4, nodeIPv6, node.DeletionTimestamp != nil); err != nil {
return nil, err
}
}
@ -97,9 +88,15 @@ func (h *handler) updateCoreDNSConfigMap(nodeName, hostName, nodeIPv4, nodeIPv6
nodeNames += " " + hostName
}
configMap, err := h.configMaps.Get("kube-system", "coredns", metav1.GetOptions{})
if err != nil || configMap == nil {
logrus.Warn(pkgerrors.WithMessage(err, "Unable to fetch coredns config map"))
var configMap *corev1.ConfigMap
if val, ok, err := h.configMapsStore.GetByKey("kube-system/coredns"); err != nil {
logrus.Errorf("Failed to get coredns ConfigMap from cache: %v", err)
} else if ok {
if cm, ok := val.(*corev1.ConfigMap); ok {
configMap = cm
}
}
if configMap == nil {
return nil
}
@ -147,7 +144,8 @@ func (h *handler) updateCoreDNSConfigMap(nodeName, hostName, nodeIPv4, nodeIPv6
return nil
}
// Something's out of sync, set the desired entries
// Something's out of sync, copy the ConfigMap for update and sync the desired entries
configMap = configMap.DeepCopy()
if nodeIPv4 != "" {
addressMap[nodeIPv4] = namesv4
}
@ -174,7 +172,7 @@ func (h *handler) updateCoreDNSConfigMap(nodeName, hostName, nodeIPv4, nodeIPv6
}
configMap.Data["NodeHosts"] = newHosts
if _, err := h.configMaps.Update(configMap); err != nil {
if _, err := h.configMaps.Update(h.ctx, configMap, metav1.UpdateOptions{}); err != nil {
return err
}
@ -182,12 +180,8 @@ func (h *handler) updateCoreDNSConfigMap(nodeName, hostName, nodeIPv4, nodeIPv6
if removed {
actionType = "Removed"
} else {
actionType = "Updated"
actionType = "Synced"
}
logrus.Infof("%s coredns NodeHosts entry for %s", actionType, nodeName)
logrus.Infof("%s coredns NodeHosts entries for %s", actionType, nodeName)
return nil
}
func (h *handler) removeNodePassword(nodeName string) error {
return nodepassword.Delete(h.secrets, nodeName)
}

View file

@ -0,0 +1,179 @@
package nodepassword
import (
"context"
"errors"
"strings"
"time"
"github.com/k3s-io/k3s/pkg/secretsencrypt"
coreclient "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
toolscache "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/pager"
)
// MinOrphanSecretAge is the minimum age for an orphaned node-password secret to be cleaned up.
// Since the secret is created when the supervisor reqeusts certs from the server, but the
// node is not created until after containerd and the kubelet start, we need to allow a reasonable
// amount of time before cleaning it up.
var MinOrphanSecretAge = 10 * time.Minute
// controller holds a reference to the last registered nodePasswordController,
// so that the node password validator can share its caches.
var controller *nodePasswordController
func Register(ctx context.Context, coreClient kubernetes.Interface, secrets coreclient.SecretController, nodes coreclient.NodeController) error {
// start a cache that only watches only node-password secrets in the kube-system namespace
lw := toolscache.NewListWatchFromClient(coreClient.CoreV1().RESTClient(), "secrets", metav1.NamespaceSystem, fields.OneTermEqualSelector("type", string(SecretTypeNodePassword)))
informerOpts := toolscache.InformerOptions{ListerWatcher: lw, ObjectType: &corev1.Secret{}, Handler: &toolscache.ResourceEventHandlerFuncs{}}
indexer, informer := toolscache.NewInformerWithOptions(informerOpts)
npc := &nodePasswordController{
nodes: nodes,
secrets: secrets,
secretsStore: indexer,
}
// migrate legacy secrets over to the new type. this must not be fatal, as
// there may be validating webhooks that prevent deleting or creating secrets
// until the cluster is up. ref: github.com/k3s-io/k3s/issues/7654
if err := npc.migrateSecrets(ctx); err != nil {
logrus.Errorf("Failed to migrate node-password secrets: %v", err)
}
nodes.OnChange(ctx, "node-password", npc.onChangeNode)
go informer.Run(ctx.Done())
go wait.UntilWithContext(ctx, npc.sync, time.Minute)
controller = npc
return nil
}
type nodePasswordController struct {
nodes coreclient.NodeController
secrets coreclient.SecretController
secretsStore toolscache.Store
}
// onChangeNode ensures that the node password secret has an OwnerRefence to its
// node, after the node has been created. This will ensure that the garbage
// collector removes the secret once the owning node is deleted.
func (npc *nodePasswordController) onChangeNode(key string, node *corev1.Node) (*corev1.Node, error) {
if node == nil {
return node, nil
}
secret, err := npc.getSecret(node.Name, true)
if err != nil {
if apierrors.IsNotFound(err) {
return node, nil
}
return nil, err
}
for _, ref := range secret.OwnerReferences {
if ref.APIVersion == node.APIVersion && ref.Kind == node.Kind && ref.Name == node.Name && ref.UID == node.UID {
return node, nil
}
}
logrus.Infof("Adding node OwnerReference to node-password secret %s", secret.Name)
secret = secret.DeepCopy()
secret.OwnerReferences = append(secret.OwnerReferences, metav1.OwnerReference{
APIVersion: node.APIVersion,
Kind: node.Kind,
Name: node.Name,
UID: node.UID,
})
_, err = npc.secrets.Update(secret)
return node, err
}
// sync deletes all node password secrets older than the configured time that
// do not have a corresponding node. Garbage collection should handle secrets
// for nodes that were deleted, so this cleanup is mostly for nodes that
// requested certificates but never successfully joined the cluster.
func (npc *nodePasswordController) sync(ctx context.Context) {
if !npc.nodes.Informer().HasSynced() {
return
}
minCreateTime := time.Now().Add(-MinOrphanSecretAge)
nodeSecretNames := sets.Set[string]{}
for _, nodeName := range npc.nodes.Informer().GetStore().ListKeys() {
nodeSecretNames = nodeSecretNames.Insert(getSecretName(nodeName))
}
for _, s := range npc.secretsStore.List() {
secret, ok := s.(*corev1.Secret)
if !ok || secret.CreationTimestamp.After(minCreateTime) || nodeSecretNames.Has(secret.Name) {
continue
}
if err := npc.secrets.Delete(secret.Namespace, secret.Name, &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}}); err != nil {
logrus.Errorf("Failed to delete orphaned node-password secret %s: %v", secret.Name, err)
} else {
logrus.Warnf("Deleted orphaned node-password secret %s created %s", secret.Name, secret.CreationTimestamp)
}
}
}
// migrateSecrets recreates legacy node password secrets with the correct type
func (npc *nodePasswordController) migrateSecrets(ctx context.Context) error {
secretSuffix := getSecretName("")
secretPager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
return npc.secrets.List(metav1.NamespaceSystem, opts)
}))
secretPager.PageSize = secretsencrypt.SecretListPageSize
return secretPager.EachListItem(ctx, metav1.ListOptions{}, func(obj runtime.Object) error {
secret, ok := obj.(*corev1.Secret)
if !ok {
return errors.New("failed to convert object to Secret")
}
// skip migrating secrets that already have the correct type, or are not a node password secret
if secret.Type == SecretTypeNodePassword || !strings.HasSuffix(secret.Name, secretSuffix) {
return nil
}
// delete the old object, and create a new one with the correct type -
// we have to delete and re-create because the type field is immutable
logrus.Infof("Migrating node-password secret %s", secret.Name)
deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.ObjectMeta.UID}}
if err := npc.secrets.Delete(secret.Namespace, secret.Name, deleteOpts); err != nil && !apierrors.IsConflict(err) && !apierrors.IsNotFound(err) {
return err
}
newSecret := secret.DeepCopy()
newSecret.ObjectMeta.UID = ""
newSecret.ObjectMeta.ResourceVersion = ""
newSecret.Type = SecretTypeNodePassword
if _, err := npc.secrets.Create(newSecret); err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
return nil
})
}
// getSecret is a helper function to get a node password secret from the store,
// or directly from the apiserver.
func (npc *nodePasswordController) getSecret(nodeName string, cached bool) (*corev1.Secret, error) {
if cached {
name := metav1.NamespaceSystem + "/" + getSecretName(nodeName)
val, ok, err := npc.secretsStore.GetByKey(name)
if err != nil {
return nil, err
}
if !ok {
return nil, apierrors.NewNotFound(schema.GroupResource{Resource: "secret"}, name)
}
s, ok := val.(*corev1.Secret)
if !ok {
return nil, errors.New("failed to convert object to Secret")
}
return s, nil
}
return npc.secrets.Get(metav1.NamespaceSystem, getSecretName(nodeName), metav1.GetOptions{})
}

View file

@ -1,13 +1,15 @@
package nodepassword
import (
"context"
"errors"
"fmt"
"strings"
"github.com/k3s-io/k3s/pkg/authenticator/hash"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/version"
coreclient "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
pkgerrors "github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -49,9 +51,8 @@ func getSecretName(nodeName string) string {
return strings.ToLower(nodeName + ".node-password." + version.Program)
}
func verifyHash(secretClient coreclient.SecretController, nodeName, pass string) error {
name := getSecretName(nodeName)
secret, err := secretClient.Cache().Get(metav1.NamespaceSystem, name)
func (npc *nodePasswordController) verifyHash(nodeName, pass string, cached bool) error {
secret, err := npc.getSecret(nodeName, cached)
if err != nil {
return &passwordError{node: nodeName, err: err}
}
@ -64,16 +65,16 @@ func verifyHash(secretClient coreclient.SecretController, nodeName, pass string)
return &passwordError{node: nodeName, err: errors.New("password hash not found in node secret")}
}
// Ensure will verify a node-password secret if it exists, otherwise it will create one
func Ensure(secretClient coreclient.SecretController, nodeName, pass string) error {
err := verifyHash(secretClient, nodeName, pass)
// ensure will verify a node-password secret if it exists, otherwise it will create one
func (npc *nodePasswordController) ensure(nodeName, pass string) error {
err := npc.verifyHash(nodeName, pass, true)
if apierrors.IsNotFound(err) {
var hash string
hash, err = Hasher.CreateHash(pass)
if err != nil {
return &passwordError{node: nodeName, err: err}
}
_, err = secretClient.Create(&v1.Secret{
_, err = npc.secrets.Create(&v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: getSecretName(nodeName),
Namespace: metav1.NamespaceSystem,
@ -82,11 +83,29 @@ func Ensure(secretClient coreclient.SecretController, nodeName, pass string) err
Data: map[string][]byte{"hash": []byte(hash)},
Type: SecretTypeNodePassword,
})
if apierrors.IsAlreadyExists(err) {
// secret already exists, try to verify again without cache
return npc.verifyHash(nodeName, pass, false)
}
}
return err
}
// Delete will remove a node-password secret
func Delete(secretClient coreclient.SecretController, nodeName string) error {
return secretClient.Delete(metav1.NamespaceSystem, getSecretName(nodeName), &metav1.DeleteOptions{})
// verifyNode confirms that a node with the given name exists, to prevent auth
// from succeeding with a client certificate for a node that has been deleted from the cluster.
func (npc *nodePasswordController) verifyNode(ctx context.Context, node *nodeInfo) error {
if nodeName, isNodeAuth := identifier.NodeIdentity(node.User); isNodeAuth {
if _, err := npc.nodes.Cache().Get(nodeName); err != nil {
return pkgerrors.WithMessage(err, "unable to verify node identity")
}
}
return nil
}
// Delete uses the controller to delete the secret for a node, if the controller has been started
func Delete(nodeName string) error {
if controller == nil {
return util.ErrCoreNotReady
}
return controller.secrets.Delete(metav1.NamespaceSystem, getSecretName(nodeName), &metav1.DeleteOptions{})
}

View file

@ -8,8 +8,6 @@ import (
"runtime"
"testing"
"github.com/k3s-io/k3s/tests/mock"
"go.uber.org/mock/gomock"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
)
@ -22,37 +20,6 @@ func Test_UnitAsserts(t *testing.T) {
assertNotEqual(t, 1, 0)
}
func Test_UnitEnsureDelete(t *testing.T) {
logMemUsage(t)
v1Mock := mock.NewV1(gomock.NewController(t))
secretClient := v1Mock.SecretMock
secretCache := v1Mock.SecretCache
secretStore := &mock.SecretStore{}
// Set up expected call counts for tests
// Expect to see 2 creates, any number of cache gets, and 2 deletes.
secretClient.EXPECT().Create(gomock.Any()).Times(2).DoAndReturn(secretStore.Create)
secretClient.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).DoAndReturn(secretStore.Delete)
secretClient.EXPECT().Cache().AnyTimes().Return(secretCache)
secretCache.EXPECT().Get(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(secretStore.Get)
// Run tests
assertEqual(t, Ensure(secretClient, "node1", "Hello World"), nil)
assertEqual(t, Ensure(secretClient, "node1", "Hello World"), nil)
assertNotEqual(t, Ensure(secretClient, "node1", "Goodbye World"), nil)
assertEqual(t, Delete(secretClient, "node1"), nil)
assertNotEqual(t, Delete(secretClient, "node1"), nil)
assertEqual(t, Ensure(secretClient, "node1", "Hello Universe"), nil)
assertNotEqual(t, Ensure(secretClient, "node1", "Hello World"), nil)
assertEqual(t, Ensure(secretClient, "node1", "Hello Universe"), nil)
logMemUsage(t)
}
func Test_PasswordError(t *testing.T) {
err := &passwordError{node: "test", err: fmt.Errorf("inner error")}
assertEqual(t, errors.Is(err, ErrVerifyFailed), true)

View file

@ -15,7 +15,6 @@ import (
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/util"
pkgerrors "github.com/pkg/errors"
coreclient "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
@ -44,10 +43,7 @@ type nodeInfo struct {
// These checks prevent a user with access to one agent from requesting kubelet certificates that
// could be used to impersonate another cluster member.
func GetNodeAuthValidator(ctx context.Context, control *config.Control) NodeAuthValidator {
runtime := control.Runtime
deferredNodes := map[string]bool{}
var secretClient coreclient.SecretController
var nodeClient coreclient.NodeController
var mu sync.Mutex
return func(req *http.Request) (string, int, error) {
@ -63,12 +59,8 @@ func GetNodeAuthValidator(ctx context.Context, control *config.Control) NodeAuth
return "", http.StatusBadRequest, errors.New("header node name does not match auth node name")
}
if secretClient == nil || nodeClient == nil {
if runtime.Core != nil {
// initialize the client if we can
secretClient = runtime.Core.Core().V1().Secret()
nodeClient = runtime.Core.Core().V1().Node()
} else if node.Name == os.Getenv("NODE_NAME") {
if controller == nil {
if node.Name == os.Getenv("NODE_NAME") {
// If we're verifying our own password, verify it locally and ensure a secret later.
return verifyLocalPassword(ctx, control, &mu, deferredNodes, node)
} else if control.DisableAPIServer && !isNodeAuth {
@ -76,18 +68,18 @@ func GetNodeAuthValidator(ctx context.Context, control *config.Control) NodeAuth
// defer node password verification until an apiserver joins the cluster.
return verifyRemotePassword(ctx, control, &mu, deferredNodes, node)
} else {
// Otherwise, reject the request until the core is ready.
// Otherwise, reject the request until the controller is ready.
return "", http.StatusServiceUnavailable, util.ErrCoreNotReady
}
}
// verify that the node exists, if using Node Identity auth
if err := verifyNode(ctx, nodeClient, node); err != nil {
if err := controller.verifyNode(ctx, node); err != nil {
return "", http.StatusUnauthorized, err
}
// verify that the node password secret matches, or create it if it does not
if err := Ensure(secretClient, node.Name, node.Password); err != nil {
if err := controller.ensure(node.Name, node.Password); err != nil {
// if the verification failed, reject the request
if errors.Is(err, ErrVerifyFailed) {
return "", http.StatusForbidden, err
@ -97,6 +89,8 @@ func GetNodeAuthValidator(ctx context.Context, control *config.Control) NodeAuth
// allows nodes to join the cluster during outages caused by validating webhooks
// blocking secret creation - if the outage requires new nodes to join in order to
// run the webhook pods, we must fail open here to resolve the outage.
// ref: github.com/k3s-io/k3s/issues/7654
logrus.Warnf("Failed to ensure node-password secret for node %s: %v", node.Name, err)
return verifyRemotePassword(ctx, control, &mu, deferredNodes, node)
}
@ -189,24 +183,11 @@ func verifyRemotePassword(ctx context.Context, control *config.Control, mu *sync
return node.Name, http.StatusOK, nil
}
// verifyNode confirms that a node with the given name exists, to prevent auth
// from succeeding with a client certificate for a node that has been deleted from the cluster.
func verifyNode(ctx context.Context, nodeClient coreclient.NodeController, node *nodeInfo) error {
if nodeName, isNodeAuth := identifier.NodeIdentity(node.User); isNodeAuth {
if _, err := nodeClient.Cache().Get(nodeName); err != nil {
return pkgerrors.WithMessage(err, "unable to verify node identity")
}
}
return nil
}
// ensureSecret validates a server's node password secret once the apiserver is up.
// As the node has already joined the cluster at this point, this is purely informational.
func ensureSecret(ctx context.Context, control *config.Control, node *nodeInfo) {
runtime := control.Runtime
_ = wait.PollUntilContextCancel(ctx, time.Second*5, true, func(ctx context.Context) (bool, error) {
if runtime.Core != nil {
secretClient := runtime.Core.Core().V1().Secret()
if controller != nil {
// This is consistent with events attached to the node generated by the kubelet
// https://github.com/kubernetes/kubernetes/blob/612130dd2f4188db839ea5c2dea07a96b0ad8d1c/pkg/kubelet/kubelet.go#L479-L485
nodeRef := &corev1.ObjectReference{
@ -215,12 +196,12 @@ func ensureSecret(ctx context.Context, control *config.Control, node *nodeInfo)
UID: types.UID(node.Name),
Namespace: "",
}
if err := Ensure(secretClient, node.Name, node.Password); err != nil {
runtime.Event.Eventf(nodeRef, corev1.EventTypeWarning, "NodePasswordValidationFailed", "Deferred node password secret validation failed: %v", err)
if err := controller.ensure(node.Name, node.Password); err != nil {
control.Runtime.Event.Eventf(nodeRef, corev1.EventTypeWarning, "NodePasswordValidationFailed", "Deferred node password secret validation failed: %v", err)
// Return true to stop polling if the password verification failed; only retry on secret creation errors.
return errors.Is(err, ErrVerifyFailed), nil
}
runtime.Event.Event(nodeRef, corev1.EventTypeNormal, "NodePasswordValidationComplete", "Deferred node password secret validation complete")
control.Runtime.Event.Event(nodeRef, corev1.EventTypeNormal, "NodePasswordValidationComplete", "Deferred node password secret validation complete")
return true, nil
}
return false, nil

View file

@ -21,6 +21,10 @@ import (
"github.com/k3s-io/k3s/pkg/authenticator"
"github.com/k3s-io/k3s/pkg/cli/cmds"
"github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/nodepassword"
"github.com/k3s-io/k3s/pkg/util"
"github.com/k3s-io/k3s/pkg/util/logger"
"github.com/k3s-io/k3s/pkg/version"
testutil "github.com/k3s-io/k3s/tests"
"github.com/k3s-io/k3s/tests/mock"
. "github.com/onsi/gomega"
@ -31,10 +35,14 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)
func init() {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetLevel(logrus.TraceLevel)
logrus.SetFormatter(&logrus.TextFormatter{DisableQuote: true})
klog.SetLoggerWithOptions(logger.NewLogrusSink(nil).AsLogr(), klog.ContextualLogger(true))
}
func Test_UnitHandlers(t *testing.T) {
@ -1610,6 +1618,7 @@ func Test_UnitHandlers(t *testing.T) {
// getCorelessControl returns a Control structure with no mocked core controllers,
// as if the apiserver were not yet available.
func getCorelessControl(t *testing.T) (*config.Control, context.CancelFunc) {
g := NewWithT(t)
ctx, cancel := context.WithCancel(context.Background())
control := &config.Control{
Token: "token",
@ -1639,7 +1648,7 @@ func getCorelessControl(t *testing.T) (*config.Control, context.CancelFunc) {
"--basic-auth-file=" + control.Runtime.PasswdFile,
"--client-ca-file=" + control.Runtime.ClientCA,
})
NewWithT(t).Expect(err).ToNot(HaveOccurred())
g.Expect(err).ToNot(HaveOccurred())
control.Runtime.Authenticator = auth
// finally, bind request handlers
@ -1651,6 +1660,7 @@ func getCorelessControl(t *testing.T) (*config.Control, context.CancelFunc) {
// getCorelessAgentlessControl returns a Control structure with no mocked core controllers,
// as if the apiserver were not yet available on a node with no local agent.
func getCorelessAgentlessControl(t *testing.T) (*config.Control, context.CancelFunc) {
g := NewWithT(t)
ctx, cancel := context.WithCancel(context.Background())
control := &config.Control{
Token: "token",
@ -1674,7 +1684,7 @@ func getCorelessAgentlessControl(t *testing.T) (*config.Control, context.CancelF
"--basic-auth-file=" + control.Runtime.PasswdFile,
"--client-ca-file=" + control.Runtime.ClientCA,
})
NewWithT(t).Expect(err).ToNot(HaveOccurred())
g.Expect(err).ToNot(HaveOccurred())
control.Runtime.Authenticator = auth
// finally, bind request handlers
@ -1686,6 +1696,7 @@ func getCorelessAgentlessControl(t *testing.T) (*config.Control, context.CancelF
// getMockedControl returns a Control structure with mocked core controllers in place
// of a full functional datastore and apiserver.
func getMockedControl(t *testing.T) (*config.Control, context.CancelFunc) {
g := NewWithT(t)
ctx, cancel := context.WithCancel(context.Background())
control := &config.Control{
Token: "token",
@ -1701,6 +1712,18 @@ func getMockedControl(t *testing.T) (*config.Control, context.CancelFunc) {
// setting up a whole remotedialer tunnel server here
control.Runtime.Tunnel = http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {})
// While creating a RESTConfig and client will succeed as long as the kubeconfig is valid,
// the client created here will not be usable as the apiserver is not started for testing.
// We create a real client instead of using k8s.io/client-go/kubernetes/fake because fake's
// RESTClient() calls all return nil, which causes ListWatch users to panic. With a real
// client, the calls just fail which is fine for what we're doing here.
restConfig, err := util.GetRESTConfig(control.Runtime.KubeConfigSupervisor)
g.Expect(err).ToNot(HaveOccurred())
restConfig.UserAgent = util.GetUserAgent(version.Program + "-supervisor")
k8s, err := kubernetes.NewForConfig(restConfig)
g.Expect(err).ToNot(HaveOccurred())
// wire up mock controllers and cache stores
secretStore := &mock.SecretStore{}
nodeStore := &mock.NodeStore{}
@ -1708,20 +1731,32 @@ func getMockedControl(t *testing.T) (*config.Control, context.CancelFunc) {
nodeStore.Create(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "k3s-agent-1"}})
ctrl := gomock.NewController(t)
informer := mock.NewSharedIndexInformer(ctrl)
informer.EXPECT().HasSynced().AnyTimes().Return(false)
coreFactory := mock.NewCoreFactory(ctrl)
coreFactory.CoreMock.V1Mock.SecretMock.EXPECT().Cache().AnyTimes().Return(coreFactory.CoreMock.V1Mock.SecretCache)
coreFactory.CoreMock.V1Mock.SecretMock.EXPECT().Create(gomock.Any()).AnyTimes().DoAndReturn(secretStore.Create)
coreFactory.CoreMock.V1Mock.SecretCache.EXPECT().Get(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(secretStore.Get)
coreFactory.CoreMock.V1Mock.SecretMock.EXPECT().List(gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(secretStore.ListWithOptions)
coreFactory.CoreMock.V1Mock.SecretMock.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().DoAndReturn(secretStore.GetWithOptions)
coreFactory.CoreMock.V1Mock.NodeMock.EXPECT().OnChange(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
coreFactory.CoreMock.V1Mock.NodeMock.EXPECT().Cache().AnyTimes().Return(coreFactory.CoreMock.V1Mock.NodeCache)
coreFactory.CoreMock.V1Mock.NodeMock.EXPECT().Informer().AnyTimes().Return(informer)
coreFactory.CoreMock.V1Mock.NodeCache.EXPECT().Get(gomock.Any()).AnyTimes().DoAndReturn(nodeStore.Get)
control.Runtime.Core = coreFactory
control.Runtime.K8s = k8s
// create event recorder
control.Runtime.Event = util.BuildControllerEventRecorder(control.Runtime.K8s, version.Program+"-supervisor", metav1.NamespaceAll)
// start the node password controller
err = nodepassword.Register(ctx, control.Runtime.K8s, coreFactory.Core().V1().Secret(), coreFactory.Core().V1().Node())
g.Expect(err).ToNot(HaveOccurred())
// add authenticator
auth, err := authenticator.FromArgs([]string{
"--basic-auth-file=" + control.Runtime.PasswdFile,
"--client-ca-file=" + control.Runtime.ClientCA,
})
NewWithT(t).Expect(err).ToNot(HaveOccurred())
g.Expect(err).ToNot(HaveOccurred())
control.Runtime.Authenticator = auth
// finally, bind request handlers

View file

@ -21,6 +21,7 @@ import (
"github.com/k3s-io/k3s/pkg/datadir"
"github.com/k3s-io/k3s/pkg/deploy"
"github.com/k3s-io/k3s/pkg/node"
"github.com/k3s-io/k3s/pkg/nodepassword"
"github.com/k3s-io/k3s/pkg/rootlessports"
"github.com/k3s-io/k3s/pkg/secretsencrypt"
"github.com/k3s-io/k3s/pkg/server/handlers"
@ -117,6 +118,11 @@ func runControllers(ctx context.Context, config *Config) error {
return pkgerrors.WithMessage(err, "failed to stage files")
}
// start the nodepassword controller before we set controlConfig.Runtime.Core
if err := nodepassword.Register(ctx, sc.K8s, sc.Core.Core().V1().Secret(), sc.Core.Core().V1().Node()); err != nil {
return pkgerrors.WithMessage(err, "failed to start node-password secret controller")
}
controlConfig.Runtime.K8s = sc.K8s
controlConfig.Runtime.K3s = sc.K3s
controlConfig.Runtime.Event = sc.Event
@ -198,7 +204,7 @@ func runOrDie(ctx context.Context, name string, cb leader.Callback) {
}
// coreControllers starts the following controllers, if they are enabled:
// * Node controller (manages nodes passwords and coredns hosts file)
// * Node controller (manages coredns node hosts file)
// * Helm controller
// * Secrets encryption
// * Rootless ports
@ -206,8 +212,7 @@ func runOrDie(ctx context.Context, name string, cb leader.Callback) {
func coreControllers(ctx context.Context, sc *Context, config *Config) error {
if err := node.Register(ctx,
!config.ControlConfig.Skips["coredns"],
sc.Core.Core().V1().Secret(),
sc.Core.Core().V1().ConfigMap(),
sc.K8s,
sc.Core.Core().V1().Node()); err != nil {
return err
}

View file

@ -207,6 +207,35 @@ func (m *SecretStore) Get(namespace, name string) (*v1.Secret, error) {
return nil, ErrorNotFound("secret", name)
}
func (m *SecretStore) GetWithOptions(namespace, name string, opts metav1.GetOptions) (*v1.Secret, error) {
return m.Get(namespace, name)
}
func (m *SecretStore) List(namespace string, ls labels.Selector) ([]v1.Secret, error) {
secrets := []v1.Secret{}
if ls == nil {
ls = labels.Everything()
}
for _, secret := range m.secrets[namespace] {
if ls.Matches(labels.Set(secret.Labels)) {
secrets = append(secrets, secret)
}
}
return secrets, nil
}
func (m *SecretStore) ListWithOptions(namespace string, opts metav1.ListOptions) (*v1.SecretList, error) {
ls, err := labels.Parse(opts.LabelSelector)
if err != nil {
return nil, err
}
secrets, err := m.List(namespace, ls)
if err != nil {
return nil, err
}
return &v1.SecretList{Items: secrets}, nil
}
// mock node store interface
type NodeStore struct {

View file

@ -0,0 +1,266 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: k8s.io/client-go/tools/cache (interfaces: SharedIndexInformer)
//
// Generated by this command:
//
// mockgen -self_package github.com/k3s-io/k3s/tests/mock -package mock -mock_names SharedIndexInformer=SharedIndexInformer k8s.io/client-go/tools/cache SharedIndexInformer
//
// Package mock is a generated GoMock package.
package mock
import (
context "context"
reflect "reflect"
time "time"
gomock "go.uber.org/mock/gomock"
cache "k8s.io/client-go/tools/cache"
)
// SharedIndexInformer is a mock of SharedIndexInformer interface.
type SharedIndexInformer struct {
ctrl *gomock.Controller
recorder *SharedIndexInformerMockRecorder
isgomock struct{}
}
// SharedIndexInformerMockRecorder is the mock recorder for SharedIndexInformer.
type SharedIndexInformerMockRecorder struct {
mock *SharedIndexInformer
}
// NewSharedIndexInformer creates a new mock instance.
func NewSharedIndexInformer(ctrl *gomock.Controller) *SharedIndexInformer {
mock := &SharedIndexInformer{ctrl: ctrl}
mock.recorder = &SharedIndexInformerMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *SharedIndexInformer) EXPECT() *SharedIndexInformerMockRecorder {
return m.recorder
}
// AddEventHandler mocks base method.
func (m *SharedIndexInformer) AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddEventHandler", handler)
ret0, _ := ret[0].(cache.ResourceEventHandlerRegistration)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AddEventHandler indicates an expected call of AddEventHandler.
func (mr *SharedIndexInformerMockRecorder) AddEventHandler(handler any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddEventHandler", reflect.TypeOf((*SharedIndexInformer)(nil).AddEventHandler), handler)
}
// AddEventHandlerWithOptions mocks base method.
func (m *SharedIndexInformer) AddEventHandlerWithOptions(handler cache.ResourceEventHandler, options cache.HandlerOptions) (cache.ResourceEventHandlerRegistration, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddEventHandlerWithOptions", handler, options)
ret0, _ := ret[0].(cache.ResourceEventHandlerRegistration)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AddEventHandlerWithOptions indicates an expected call of AddEventHandlerWithOptions.
func (mr *SharedIndexInformerMockRecorder) AddEventHandlerWithOptions(handler, options any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddEventHandlerWithOptions", reflect.TypeOf((*SharedIndexInformer)(nil).AddEventHandlerWithOptions), handler, options)
}
// AddEventHandlerWithResyncPeriod mocks base method.
func (m *SharedIndexInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) (cache.ResourceEventHandlerRegistration, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddEventHandlerWithResyncPeriod", handler, resyncPeriod)
ret0, _ := ret[0].(cache.ResourceEventHandlerRegistration)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// AddEventHandlerWithResyncPeriod indicates an expected call of AddEventHandlerWithResyncPeriod.
func (mr *SharedIndexInformerMockRecorder) AddEventHandlerWithResyncPeriod(handler, resyncPeriod any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddEventHandlerWithResyncPeriod", reflect.TypeOf((*SharedIndexInformer)(nil).AddEventHandlerWithResyncPeriod), handler, resyncPeriod)
}
// AddIndexers mocks base method.
func (m *SharedIndexInformer) AddIndexers(indexers cache.Indexers) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddIndexers", indexers)
ret0, _ := ret[0].(error)
return ret0
}
// AddIndexers indicates an expected call of AddIndexers.
func (mr *SharedIndexInformerMockRecorder) AddIndexers(indexers any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddIndexers", reflect.TypeOf((*SharedIndexInformer)(nil).AddIndexers), indexers)
}
// GetController mocks base method.
func (m *SharedIndexInformer) GetController() cache.Controller {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetController")
ret0, _ := ret[0].(cache.Controller)
return ret0
}
// GetController indicates an expected call of GetController.
func (mr *SharedIndexInformerMockRecorder) GetController() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetController", reflect.TypeOf((*SharedIndexInformer)(nil).GetController))
}
// GetIndexer mocks base method.
func (m *SharedIndexInformer) GetIndexer() cache.Indexer {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetIndexer")
ret0, _ := ret[0].(cache.Indexer)
return ret0
}
// GetIndexer indicates an expected call of GetIndexer.
func (mr *SharedIndexInformerMockRecorder) GetIndexer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIndexer", reflect.TypeOf((*SharedIndexInformer)(nil).GetIndexer))
}
// GetStore mocks base method.
func (m *SharedIndexInformer) GetStore() cache.Store {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetStore")
ret0, _ := ret[0].(cache.Store)
return ret0
}
// GetStore indicates an expected call of GetStore.
func (mr *SharedIndexInformerMockRecorder) GetStore() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStore", reflect.TypeOf((*SharedIndexInformer)(nil).GetStore))
}
// HasSynced mocks base method.
func (m *SharedIndexInformer) HasSynced() bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "HasSynced")
ret0, _ := ret[0].(bool)
return ret0
}
// HasSynced indicates an expected call of HasSynced.
func (mr *SharedIndexInformerMockRecorder) HasSynced() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasSynced", reflect.TypeOf((*SharedIndexInformer)(nil).HasSynced))
}
// IsStopped mocks base method.
func (m *SharedIndexInformer) IsStopped() bool {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "IsStopped")
ret0, _ := ret[0].(bool)
return ret0
}
// IsStopped indicates an expected call of IsStopped.
func (mr *SharedIndexInformerMockRecorder) IsStopped() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsStopped", reflect.TypeOf((*SharedIndexInformer)(nil).IsStopped))
}
// LastSyncResourceVersion mocks base method.
func (m *SharedIndexInformer) LastSyncResourceVersion() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "LastSyncResourceVersion")
ret0, _ := ret[0].(string)
return ret0
}
// LastSyncResourceVersion indicates an expected call of LastSyncResourceVersion.
func (mr *SharedIndexInformerMockRecorder) LastSyncResourceVersion() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastSyncResourceVersion", reflect.TypeOf((*SharedIndexInformer)(nil).LastSyncResourceVersion))
}
// RemoveEventHandler mocks base method.
func (m *SharedIndexInformer) RemoveEventHandler(handle cache.ResourceEventHandlerRegistration) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "RemoveEventHandler", handle)
ret0, _ := ret[0].(error)
return ret0
}
// RemoveEventHandler indicates an expected call of RemoveEventHandler.
func (mr *SharedIndexInformerMockRecorder) RemoveEventHandler(handle any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveEventHandler", reflect.TypeOf((*SharedIndexInformer)(nil).RemoveEventHandler), handle)
}
// Run mocks base method.
func (m *SharedIndexInformer) Run(stopCh <-chan struct{}) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "Run", stopCh)
}
// Run indicates an expected call of Run.
func (mr *SharedIndexInformerMockRecorder) Run(stopCh any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Run", reflect.TypeOf((*SharedIndexInformer)(nil).Run), stopCh)
}
// RunWithContext mocks base method.
func (m *SharedIndexInformer) RunWithContext(ctx context.Context) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "RunWithContext", ctx)
}
// RunWithContext indicates an expected call of RunWithContext.
func (mr *SharedIndexInformerMockRecorder) RunWithContext(ctx any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunWithContext", reflect.TypeOf((*SharedIndexInformer)(nil).RunWithContext), ctx)
}
// SetTransform mocks base method.
func (m *SharedIndexInformer) SetTransform(handler cache.TransformFunc) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetTransform", handler)
ret0, _ := ret[0].(error)
return ret0
}
// SetTransform indicates an expected call of SetTransform.
func (mr *SharedIndexInformerMockRecorder) SetTransform(handler any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTransform", reflect.TypeOf((*SharedIndexInformer)(nil).SetTransform), handler)
}
// SetWatchErrorHandler mocks base method.
func (m *SharedIndexInformer) SetWatchErrorHandler(handler cache.WatchErrorHandler) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetWatchErrorHandler", handler)
ret0, _ := ret[0].(error)
return ret0
}
// SetWatchErrorHandler indicates an expected call of SetWatchErrorHandler.
func (mr *SharedIndexInformerMockRecorder) SetWatchErrorHandler(handler any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWatchErrorHandler", reflect.TypeOf((*SharedIndexInformer)(nil).SetWatchErrorHandler), handler)
}
// SetWatchErrorHandlerWithContext mocks base method.
func (m *SharedIndexInformer) SetWatchErrorHandlerWithContext(handler cache.WatchErrorHandlerWithContext) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetWatchErrorHandlerWithContext", handler)
ret0, _ := ret[0].(error)
return ret0
}
// SetWatchErrorHandlerWithContext indicates an expected call of SetWatchErrorHandlerWithContext.
func (mr *SharedIndexInformerMockRecorder) SetWatchErrorHandlerWithContext(handler any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWatchErrorHandlerWithContext", reflect.TypeOf((*SharedIndexInformer)(nil).SetWatchErrorHandlerWithContext), handler)
}