mirror of
https://github.com/kubernetes/kubernetes.git
synced 2026-02-03 20:40:26 -05:00
Pod Certificates: Basic implementation
* Define feature gate * Define and serve PodCertificateRequest * Implement Kubelet projected volume source * kube-controller-manager GCs PodCertificateRequests * Add agnhost subcommand that implements a toy signer for testing Change-Id: Id7ed030d449806410a4fa28aab0f2ce4e01d3b10
This commit is contained in:
parent
c44bf18b9b
commit
4624cb9bb9
73 changed files with 10124 additions and 108 deletions
|
|
@ -22,6 +22,7 @@ package app
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
|
||||
|
|
@ -41,6 +42,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/controller/certificates/signer"
|
||||
csrsigningconfig "k8s.io/kubernetes/pkg/controller/certificates/signer/config"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
func newCertificateSigningRequestSigningControllerDescriptor() *ControllerDescriptor {
|
||||
|
|
@ -201,6 +203,28 @@ func startCertificateSigningRequestCleanerController(ctx context.Context, contro
|
|||
return nil, true, nil
|
||||
}
|
||||
|
||||
func newPodCertificateRequestCleanerControllerDescriptor() *ControllerDescriptor {
|
||||
return &ControllerDescriptor{
|
||||
name: names.PodCertificateRequestCleanerController,
|
||||
initFunc: startPodCertificateRequestCleanerController,
|
||||
requiredFeatureGates: []featuregate.Feature{
|
||||
features.PodCertificateRequest,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func startPodCertificateRequestCleanerController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
||||
cleaner := cleaner.NewPCRCleanerController(
|
||||
controllerContext.ClientBuilder.ClientOrDie("podcertificaterequestcleaner"),
|
||||
controllerContext.InformerFactory.Certificates().V1alpha1().PodCertificateRequests(),
|
||||
clock.RealClock{},
|
||||
15*time.Minute, // We expect all PodCertificateRequest flows to complete faster than this.
|
||||
5*time.Minute,
|
||||
)
|
||||
go cleaner.Run(ctx, 1)
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
func newRootCACertificatePublisherControllerDescriptor() *ControllerDescriptor {
|
||||
return &ControllerDescriptor{
|
||||
name: names.RootCACertificatePublisherController,
|
||||
|
|
|
|||
|
|
@ -565,6 +565,7 @@ func NewControllerDescriptors() map[string]*ControllerDescriptor {
|
|||
register(newCertificateSigningRequestSigningControllerDescriptor())
|
||||
register(newCertificateSigningRequestApprovingControllerDescriptor())
|
||||
register(newCertificateSigningRequestCleanerControllerDescriptor())
|
||||
register(newPodCertificateRequestCleanerControllerDescriptor())
|
||||
register(newTTLControllerDescriptor())
|
||||
register(newBootstrapSignerControllerDescriptor())
|
||||
register(newTokenCleanerControllerDescriptor())
|
||||
|
|
|
|||
|
|
@ -72,6 +72,7 @@ func TestControllerNamesDeclaration(t *testing.T) {
|
|||
names.CertificateSigningRequestSigningController,
|
||||
names.CertificateSigningRequestApprovingController,
|
||||
names.CertificateSigningRequestCleanerController,
|
||||
names.PodCertificateRequestCleanerController,
|
||||
names.TTLController,
|
||||
names.BootstrapSignerController,
|
||||
names.TokenCleanerController,
|
||||
|
|
|
|||
|
|
@ -63,6 +63,7 @@ const (
|
|||
CertificateSigningRequestSigningController = "certificatesigningrequest-signing-controller"
|
||||
CertificateSigningRequestApprovingController = "certificatesigningrequest-approving-controller"
|
||||
CertificateSigningRequestCleanerController = "certificatesigningrequest-cleaner-controller"
|
||||
PodCertificateRequestCleanerController = "podcertificaterequest-cleaner-controller"
|
||||
TTLController = "ttl-controller"
|
||||
BootstrapSignerController = "bootstrap-signer-controller"
|
||||
TokenCleanerController = "token-cleaner-controller"
|
||||
|
|
|
|||
|
|
@ -683,6 +683,7 @@ func dropDisabledFields(
|
|||
dropDisabledMatchLabelKeysFieldInPodAffinity(podSpec, oldPodSpec)
|
||||
dropDisabledDynamicResourceAllocationFields(podSpec, oldPodSpec)
|
||||
dropDisabledClusterTrustBundleProjection(podSpec, oldPodSpec)
|
||||
dropDisabledPodCertificateProjection(podSpec, oldPodSpec)
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !inPlacePodVerticalScalingInUse(oldPodSpec) {
|
||||
// Drop ResizePolicy fields. Don't drop updates to Resources field as template.spec.resources
|
||||
|
|
@ -1309,6 +1310,49 @@ func dropDisabledClusterTrustBundleProjection(podSpec, oldPodSpec *api.PodSpec)
|
|||
}
|
||||
}
|
||||
|
||||
func podCertificateProjectionInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
}
|
||||
for _, v := range podSpec.Volumes {
|
||||
if v.Projected == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, s := range v.Projected.Sources {
|
||||
if s.PodCertificate != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func dropDisabledPodCertificateProjection(podSpec, oldPodSpec *api.PodSpec) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodCertificateRequest) {
|
||||
return
|
||||
}
|
||||
if podSpec == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If the pod was already using it, it can keep using it.
|
||||
if podCertificateProjectionInUse(oldPodSpec) {
|
||||
return
|
||||
}
|
||||
|
||||
for i := range podSpec.Volumes {
|
||||
if podSpec.Volumes[i].Projected == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for j := range podSpec.Volumes[i].Projected.Sources {
|
||||
podSpec.Volumes[i].Projected.Sources[j].PodCertificate = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func hasInvalidLabelValueInAffinitySelector(spec *api.PodSpec) bool {
|
||||
if spec.Affinity != nil {
|
||||
if spec.Affinity.PodAffinity != nil {
|
||||
|
|
@ -1517,6 +1561,8 @@ func HasAPIObjectReference(pod *api.Pod) (bool, string, error) {
|
|||
return true, "serviceaccounts (via projected volumes)", nil
|
||||
case s.ClusterTrustBundle != nil:
|
||||
return true, "clustertrustbundles", nil
|
||||
case s.PodCertificate != nil:
|
||||
return true, "podcertificates", nil
|
||||
case s.DownwardAPI != nil:
|
||||
// Allow projected volume sources that don't require the Kubernetes API
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -3378,6 +3378,159 @@ func TestDropClusterTrustBundleProjectedVolumes(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDropPodCertificateProjectedVolumes(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
podCertificateProjectionEnabled bool
|
||||
oldPod *api.PodSpec
|
||||
newPod *api.PodSpec
|
||||
wantPod *api.PodSpec
|
||||
}{
|
||||
{
|
||||
description: "feature gate disabled, cannot add volume to pod",
|
||||
oldPod: &api.PodSpec{
|
||||
Volumes: []api.Volume{},
|
||||
},
|
||||
newPod: &api.PodSpec{
|
||||
Volumes: []api.Volume{
|
||||
{
|
||||
Name: "foo",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Projected: &api.ProjectedVolumeSource{
|
||||
Sources: []api.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &api.PodCertificateProjection{
|
||||
SignerName: "foo.example.com/bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPod: &api.PodSpec{
|
||||
Volumes: []api.Volume{
|
||||
{
|
||||
Name: "foo",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Projected: &api.ProjectedVolumeSource{
|
||||
Sources: []api.VolumeProjection{
|
||||
{},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "feature gate disabled, can keep volume on pod",
|
||||
oldPod: &api.PodSpec{
|
||||
Volumes: []api.Volume{
|
||||
{
|
||||
Name: "foo",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Projected: &api.ProjectedVolumeSource{
|
||||
Sources: []api.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &api.PodCertificateProjection{
|
||||
SignerName: "foo.example.com/bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
newPod: &api.PodSpec{
|
||||
Volumes: []api.Volume{
|
||||
{
|
||||
Name: "foo",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Projected: &api.ProjectedVolumeSource{
|
||||
Sources: []api.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &api.PodCertificateProjection{
|
||||
SignerName: "foo.example.com/bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPod: &api.PodSpec{
|
||||
Volumes: []api.Volume{
|
||||
{
|
||||
Name: "foo",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Projected: &api.ProjectedVolumeSource{
|
||||
Sources: []api.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &api.PodCertificateProjection{
|
||||
SignerName: "foo.example.com/bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "feature gate enabled, can add volume to pod",
|
||||
podCertificateProjectionEnabled: true,
|
||||
oldPod: &api.PodSpec{
|
||||
Volumes: []api.Volume{},
|
||||
},
|
||||
newPod: &api.PodSpec{
|
||||
Volumes: []api.Volume{
|
||||
{
|
||||
Name: "foo",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Projected: &api.ProjectedVolumeSource{
|
||||
Sources: []api.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &api.PodCertificateProjection{
|
||||
SignerName: "foo.example.com/bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPod: &api.PodSpec{
|
||||
Volumes: []api.Volume{
|
||||
{
|
||||
Name: "foo",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Projected: &api.ProjectedVolumeSource{
|
||||
Sources: []api.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &api.PodCertificateProjection{
|
||||
SignerName: "foo.example.com/bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodCertificateRequest, tc.podCertificateProjectionEnabled)
|
||||
|
||||
dropDisabledPodCertificateProjection(tc.newPod, tc.oldPod)
|
||||
if diff := cmp.Diff(tc.newPod, tc.wantPod); diff != "" {
|
||||
t.Fatalf("Unexpected modification to new pod; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropPodLifecycleSleepAction(t *testing.T) {
|
||||
makeSleepHandler := func() *api.LifecycleHandler {
|
||||
return &api.LifecycleHandler{
|
||||
|
|
@ -4801,6 +4954,14 @@ func TestHasAPIReferences(t *testing.T) {
|
|||
expectRejection: true,
|
||||
resource: "clustertrustbundles",
|
||||
},
|
||||
{
|
||||
name: "Non empty volume list with Projected volume with podcertificates",
|
||||
pod: &api.Pod{Spec: api.PodSpec{Volumes: []api.Volume{
|
||||
{Name: "test-volume-projected", VolumeSource: api.VolumeSource{Projected: &api.ProjectedVolumeSource{Sources: []api.VolumeProjection{{PodCertificate: &api.PodCertificateProjection{}}}}}},
|
||||
}}},
|
||||
expectRejection: true,
|
||||
resource: "podcertificates",
|
||||
},
|
||||
{
|
||||
name: "Non empty volume list with Projected volume with secrets",
|
||||
pod: &api.Pod{Spec: api.PodSpec{Volumes: []api.Volume{
|
||||
|
|
|
|||
|
|
@ -442,7 +442,6 @@ func warningsForOverlappingVirtualPaths(volumes []api.Volume) []string {
|
|||
}
|
||||
|
||||
if v.Projected != nil {
|
||||
var sourcePaths []pathAndSource
|
||||
var allPaths []pathAndSource
|
||||
|
||||
for _, source := range v.Projected.Sources {
|
||||
|
|
@ -451,6 +450,7 @@ func warningsForOverlappingVirtualPaths(volumes []api.Volume) []string {
|
|||
continue
|
||||
}
|
||||
|
||||
var sourcePaths []pathAndSource
|
||||
switch {
|
||||
case source.ConfigMap != nil && source.ConfigMap.Items != nil:
|
||||
sourcePaths = extractPaths(source.ConfigMap.Items, fmt.Sprintf("ConfigMap %q", source.ConfigMap.Name))
|
||||
|
|
@ -468,6 +468,17 @@ func warningsForOverlappingVirtualPaths(volumes []api.Volume) []string {
|
|||
name = *source.ClusterTrustBundle.SignerName
|
||||
}
|
||||
sourcePaths = []pathAndSource{{source.ClusterTrustBundle.Path, fmt.Sprintf("ClusterTrustBundle %q", name)}}
|
||||
case source.PodCertificate != nil:
|
||||
sourcePaths = []pathAndSource{}
|
||||
if len(source.PodCertificate.CertificateChainPath) != 0 {
|
||||
sourcePaths = append(sourcePaths, pathAndSource{source.PodCertificate.CertificateChainPath, "PodCertificate chain"})
|
||||
}
|
||||
if len(source.PodCertificate.KeyPath) != 0 {
|
||||
sourcePaths = append(sourcePaths, pathAndSource{source.PodCertificate.KeyPath, "PodCertificate key"})
|
||||
}
|
||||
if len(source.PodCertificate.CredentialBundlePath) != 0 {
|
||||
sourcePaths = append(sourcePaths, pathAndSource{source.PodCertificate.CredentialBundlePath, "PodCertificate credential bundle"})
|
||||
}
|
||||
}
|
||||
|
||||
if len(sourcePaths) == 0 {
|
||||
|
|
|
|||
|
|
@ -19,9 +19,10 @@ package pod
|
|||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
|
@ -595,6 +596,40 @@ func TestWarnings(t *testing.T) {
|
|||
`volume "foo" (Projected): overlapping paths: "test/test2" (DownwardAPI) with "test" (Secret "TestSecret")`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "overlapping paths in projected volume - secret and pod certificate",
|
||||
template: &api.PodTemplateSpec{Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{
|
||||
Name: "foo",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Projected: &api.ProjectedVolumeSource{
|
||||
Sources: []api.VolumeProjection{{
|
||||
Secret: &api.SecretProjection{
|
||||
LocalObjectReference: api.LocalObjectReference{Name: "TestSecret"},
|
||||
Items: []api.KeyToPath{
|
||||
{Key: "mykey", Path: "test"},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
PodCertificate: &api.PodCertificateProjection{
|
||||
CredentialBundlePath: "test",
|
||||
KeyPath: "test",
|
||||
CertificateChainPath: "test",
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
}},
|
||||
expected: []string{
|
||||
`volume "foo" (Projected): overlapping paths: "test" (PodCertificate credential bundle) with "test" (PodCertificate key)`,
|
||||
`volume "foo" (Projected): overlapping paths: "test" (PodCertificate credential bundle) with "test" (PodCertificate chain)`,
|
||||
`volume "foo" (Projected): overlapping paths: "test" (PodCertificate credential bundle) with "test" (Secret "TestSecret")`,
|
||||
`volume "foo" (Projected): overlapping paths: "test" (PodCertificate key) with "test" (PodCertificate chain)`,
|
||||
`volume "foo" (Projected): overlapping paths: "test" (PodCertificate key) with "test" (Secret "TestSecret")`,
|
||||
`volume "foo" (Projected): overlapping paths: "test" (PodCertificate chain) with "test" (Secret "TestSecret")`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "overlapping paths in projected volume - downward api and cluster thrust bundle api",
|
||||
template: &api.PodTemplateSpec{Spec: api.PodSpec{
|
||||
|
|
@ -1806,16 +1841,8 @@ func TestWarnings(t *testing.T) {
|
|||
oldTemplate = tc.oldTemplate
|
||||
}
|
||||
actual := GetWarningsForPodTemplate(context.TODO(), nil, tc.template, oldTemplate)
|
||||
if len(actual) != len(tc.expected) {
|
||||
t.Errorf("expected %d errors, got %d:\n%v", len(tc.expected), len(actual), strings.Join(actual, "\n"))
|
||||
}
|
||||
actualSet := sets.New(actual...)
|
||||
expectedSet := sets.New(tc.expected...)
|
||||
for _, missing := range sets.List(expectedSet.Difference(actualSet)) {
|
||||
t.Errorf("missing: %s", missing)
|
||||
}
|
||||
for _, extra := range sets.List(actualSet.Difference(expectedSet)) {
|
||||
t.Errorf("extra: %s", extra)
|
||||
if diff := cmp.Diff(actual, tc.expected, cmpopts.SortSlices(stringLess), cmpopts.EquateEmpty()); diff != "" {
|
||||
t.Errorf("bad warning output; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
})
|
||||
|
||||
|
|
@ -1829,21 +1856,17 @@ func TestWarnings(t *testing.T) {
|
|||
}
|
||||
}
|
||||
actual := GetWarningsForPod(context.TODO(), pod, &api.Pod{})
|
||||
if len(actual) != len(tc.expected) {
|
||||
t.Errorf("expected %d errors, got %d:\n%v", len(tc.expected), len(actual), strings.Join(actual, "\n"))
|
||||
}
|
||||
actualSet := sets.New(actual...)
|
||||
expectedSet := sets.New(tc.expected...)
|
||||
for _, missing := range sets.List(expectedSet.Difference(actualSet)) {
|
||||
t.Errorf("missing: %s", missing)
|
||||
}
|
||||
for _, extra := range sets.List(actualSet.Difference(expectedSet)) {
|
||||
t.Errorf("extra: %s", extra)
|
||||
if diff := cmp.Diff(actual, tc.expected, cmpopts.SortSlices(stringLess), cmpopts.EquateEmpty()); diff != "" {
|
||||
t.Errorf("bad warning output; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func stringLess(a, b string) bool {
|
||||
return a < b
|
||||
}
|
||||
|
||||
func TestTemplateOnlyWarnings(t *testing.T) {
|
||||
testcases := []struct {
|
||||
name string
|
||||
|
|
|
|||
|
|
@ -93,6 +93,8 @@ func TestDefaulting(t *testing.T) {
|
|||
{Group: "batch", Version: "v2alpha1", Kind: "CronJob"}: {},
|
||||
{Group: "batch", Version: "v2alpha1", Kind: "CronJobList"}: {},
|
||||
{Group: "batch", Version: "v2alpha1", Kind: "JobTemplate"}: {},
|
||||
{Group: "certificates.k8s.io", Version: "v1alpha1", Kind: "PodCertificateRequest"}: {},
|
||||
{Group: "certificates.k8s.io", Version: "v1alpha1", Kind: "PodCertificateRequestList"}: {},
|
||||
{Group: "certificates.k8s.io", Version: "v1beta1", Kind: "CertificateSigningRequest"}: {},
|
||||
{Group: "certificates.k8s.io", Version: "v1beta1", Kind: "CertificateSigningRequestList"}: {},
|
||||
{Group: "discovery.k8s.io", Version: "v1", Kind: "EndpointSlice"}: {},
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
"k8s.io/client-go/util/certificate/csr"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
// Funcs returns the fuzzer functions for the certificates api group.
|
||||
|
|
@ -42,5 +43,16 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
|
|||
obj.Status = api.ConditionTrue
|
||||
}
|
||||
},
|
||||
func(obj *certificates.PodCertificateRequestSpec, c randfill.Continue) {
|
||||
c.FillNoCustom(obj) // fuzz self without calling this function again
|
||||
|
||||
// MaxExpirationSeconds has a field defaulter, so we should make
|
||||
// sure it's non-nil. Otherwise,
|
||||
// pkg/api/testing/serialization_test.go TestRoundTripTypes will
|
||||
// fail with diffs due to the defaulting.
|
||||
if obj.MaxExpirationSeconds == nil {
|
||||
obj.MaxExpirationSeconds = ptr.To[int32](86400)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,6 +49,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
&CertificateSigningRequestList{},
|
||||
&ClusterTrustBundle{},
|
||||
&ClusterTrustBundleList{},
|
||||
&PodCertificateRequest{},
|
||||
&PodCertificateRequestList{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package certificates
|
|||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
|
|
@ -280,3 +281,217 @@ type ClusterTrustBundleList struct {
|
|||
|
||||
// MaxTrustBundleSize is the maximimum size of a single trust bundle field.
|
||||
const MaxTrustBundleSize = 1 * 1024 * 1024
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodCertificateRequest encodes a pod requesting a certificate from a given
|
||||
// signer.
|
||||
//
|
||||
// Kubelets use this API to implement podCertificate projected volumes
|
||||
type PodCertificateRequest struct {
|
||||
metav1.TypeMeta
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Spec contains the details about the certificate being requested.
|
||||
Spec PodCertificateRequestSpec
|
||||
|
||||
// Status contains the issued certificate, and a standard set of conditions.
|
||||
// +optional
|
||||
Status PodCertificateRequestStatus
|
||||
}
|
||||
|
||||
// PodCertificateRequestSpec describes the certificate request. All fields are
|
||||
// immutable after creation.
|
||||
type PodCertificateRequestSpec struct {
|
||||
// SignerName indicates the requested signer.
|
||||
//
|
||||
// All signer names beginning with `kubernetes.io` are reserved for use by
|
||||
// the Kubernetes project. There is currently one well-known signer
|
||||
// documented by the Kubernetes project,
|
||||
// `kubernetes.io/kube-apiserver-client-pod`, which will issue client
|
||||
// certificates understood by kube-apiserver. It is currently
|
||||
// unimplemented.
|
||||
SignerName string
|
||||
|
||||
// PodName is the name of the pod into which the certificate will be mounted.
|
||||
PodName string
|
||||
// PodUID is the UID of the pod into which the certificate will be mounted.
|
||||
PodUID types.UID
|
||||
|
||||
// ServiceAccountname is the name of the service account the pod is running as.
|
||||
ServiceAccountName string
|
||||
// ServiceAccountUID is the UID of the service account the pod is running as.
|
||||
ServiceAccountUID types.UID
|
||||
|
||||
// NodeName is the name of the node the pod is assigned to.
|
||||
NodeName types.NodeName
|
||||
// NodeUID is the UID of the node the pod is assigned to.
|
||||
NodeUID types.UID
|
||||
|
||||
// maxExpirationSeconds is the maximum lifetime permitted for the
|
||||
// certificate.
|
||||
//
|
||||
// If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
|
||||
// will reject values shorter than 3600 (1 hour).
|
||||
//
|
||||
// The signer implementation is then free to issue a certificate with any
|
||||
// lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
|
||||
// seconds (1 hour). This constraint is enforced by kube-apiserver.
|
||||
MaxExpirationSeconds *int32
|
||||
|
||||
// pkixPublicKey is the PKIX-serialized public key the signer will issue the
|
||||
// certificate to.
|
||||
//
|
||||
// The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
|
||||
// or ED25519. Note that this list may be expanded in the future.
|
||||
//
|
||||
// Signer implementations do not need to support all key types supported by
|
||||
// kube-apiserver and kubelet. If a signer does not support the key type
|
||||
// used for a given PodCertificateRequest, it must deny the request by
|
||||
// setting a status.conditions entry with a type of "Denied" and a reason of
|
||||
// "UnsupportedKeyType". It may also suggest a key type that it does support
|
||||
// in the message field.
|
||||
PKIXPublicKey []byte
|
||||
|
||||
// proofOfPossession proves that the requesting kubelet holds the private
|
||||
// key corresponding to pkixPublicKey.
|
||||
//
|
||||
// It is contructed by signing the ASCII bytes of the pod's UID using
|
||||
// `PKIXPublicKey`.
|
||||
//
|
||||
// kube-apiserver validates the proof of possession during creation of the
|
||||
// PodCertificateRequest.
|
||||
//
|
||||
// If the key is an RSA key, then the signature is over the ASCII bytes of
|
||||
// the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
|
||||
// function crypto/rsa.SignPSS with nil options).
|
||||
//
|
||||
// If the key is an ECDSA key, then the signature is as described by [SEC 1,
|
||||
// Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
|
||||
// golang library function crypto/ecdsa.SignASN1)
|
||||
//
|
||||
// If the key is an ED25519 key, the the signature is as described by the
|
||||
// [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by the
|
||||
// golang library crypto/ed25519.Sign).
|
||||
ProofOfPossession []byte
|
||||
}
|
||||
|
||||
type PodCertificateRequestStatus struct {
|
||||
// conditions applied to the request. Known conditions are "Denied",
|
||||
// "Failed", and "Issued".
|
||||
//
|
||||
// The types "Issued", "Denied", and "Failed" have special handling. At
|
||||
// most one of these conditions may be present, and they must have status
|
||||
// "True".
|
||||
//
|
||||
// If the request is denied with `Reason=UnsupportedKeyType`, the signer may
|
||||
// suggest a key type that will work in the message field.
|
||||
//
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
// +optional
|
||||
Conditions []metav1.Condition
|
||||
|
||||
// certificateChain is populated with an issued certificate by the signer.
|
||||
// This field is set via the /status subresource. Once populated, this field
|
||||
// is immutable.
|
||||
//
|
||||
// If the certificate signing request is denied, a condition of type
|
||||
// "Denied" is added and this field remains empty. If the signer cannot
|
||||
// issue the certificate, a condition of type "Failed" is added and this
|
||||
// field remains empty.
|
||||
//
|
||||
// Validation requirements:
|
||||
// 1. certificateChain must consist of one or more PEM-formatted certificates.
|
||||
// 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
|
||||
// described in section 4 of RFC5280.
|
||||
//
|
||||
// If more than one block is present, and the definition of the requested
|
||||
// spec.signerName does not indicate otherwise, the first block is the
|
||||
// issued certificate, and subsequent blocks should be treated as
|
||||
// intermediate certificates and presented in TLS handshakes. When
|
||||
// projecting the chain into a pod volume, kubelet will drop any data
|
||||
// in-between the PEM blocks, as well as any PEM block headers.
|
||||
//
|
||||
// +optional
|
||||
CertificateChain string
|
||||
|
||||
// notBefore is the time at which the certificate becomes valid. This field
|
||||
// is set via the /status subresource. Once populated, it is immutable.
|
||||
// The signer must set this field at the same time it sets certificateChain.
|
||||
//
|
||||
// +optional
|
||||
NotBefore *metav1.Time
|
||||
|
||||
// beginRefreshAt is the time at which the kubelet should begin trying to
|
||||
// refresh the certificate. This field is set via the /status subresource,
|
||||
// and must be set at the same time as certificateChain. Once populated,
|
||||
// this field is immutable.
|
||||
//
|
||||
// This field is only a hint. Kubelet may start refreshing before or after
|
||||
// this time if necessary.
|
||||
//
|
||||
// +optional
|
||||
BeginRefreshAt *metav1.Time
|
||||
|
||||
// notAfter is the time at which the certificate expires. This field is set
|
||||
// via the /status subresource. Once populated, it is immutable. The
|
||||
// signer must set this field at the same time it sets certificateChain.
|
||||
//
|
||||
// +optional
|
||||
NotAfter *metav1.Time
|
||||
}
|
||||
|
||||
// Well-known condition types for PodCertificateRequests
|
||||
const (
|
||||
// Denied indicates the request was denied by the signer.
|
||||
PodCertificateRequestConditionTypeDenied string = "Denied"
|
||||
// Failed indicates the signer failed to issue the certificate.
|
||||
PodCertificateRequestConditionTypeFailed string = "Failed"
|
||||
// Issued indicates the certificate has been issued.
|
||||
PodCertificateRequestConditionTypeIssued string = "Issued"
|
||||
)
|
||||
|
||||
// Well-known condition reasons for PodCertificateRequests
|
||||
const (
|
||||
// UnsupportedKeyType should be set on "Denied" conditions when the signer
|
||||
// doesn't support the key type of publicKey.
|
||||
PodCertificateRequestConditionUnsupportedKeyType string = "UnsupportedKeyType"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxPKIXPublicKeySize is the maximimum size permitted for the
|
||||
// PKIXPublicKey field. Size is chosen based on the size of an RSA 4096 key
|
||||
// plus some margin.
|
||||
MaxPKIXPublicKeySize = 10 * 1024
|
||||
// MaxProofOfPossessionSize is the maximum size permitted for the
|
||||
// ProofOfPossession field.
|
||||
MaxProofOfPossessionSize = 10 * 1024
|
||||
// MaxCertificateChainSize is the maximum size permitted for the
|
||||
// CertificateChain field.
|
||||
//
|
||||
// Size should be more than sufficient to store 10 RSA 4096 certificates,
|
||||
// each with a bunch of embedded extensions.
|
||||
MaxCertificateChainSize = 100 * 1024
|
||||
// MinMaxExpirationSeconds is the minimum value permitted for the MaxExpirationSeconds field.
|
||||
MinMaxExpirationSeconds = 60 * 60
|
||||
// MaxMaxExpirationSeconds is the maximum value permitted for the
|
||||
// MaxExpirationSeconds field for non-Kubernetes signers.
|
||||
MaxMaxExpirationSeconds = 91 * 24 * 60 * 60
|
||||
// KubernetesMaxMaxExpirationSeconds is the maximum value permitted for the
|
||||
// MaxExpirationSeconds field for Kubernetes signers.
|
||||
KubernetesMaxMaxExpirationSeconds = 24 * 60 * 60
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodCertificateRequestList is a collection of PodCertificateRequest objects.
|
||||
type PodCertificateRequestList struct {
|
||||
metav1.TypeMeta
|
||||
// +optional
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is a collection of PodCertificateRequest objects
|
||||
Items []PodCertificateRequest
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ import (
|
|||
)
|
||||
|
||||
func addConversionFuncs(scheme *runtime.Scheme) error {
|
||||
return scheme.AddFieldLabelConversionFunc(
|
||||
err := scheme.AddFieldLabelConversionFunc(
|
||||
SchemeGroupVersion.WithKind("ClusterTrustBundle"),
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
|
|
@ -34,4 +34,24 @@ func addConversionFuncs(scheme *runtime.Scheme) error {
|
|||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while adding ClusterTrustBundle field label conversion func: %w", err)
|
||||
}
|
||||
|
||||
err = scheme.AddFieldLabelConversionFunc(
|
||||
SchemeGroupVersion.WithKind("PodCertificateRequest"),
|
||||
func(label, value string) (string, string, error) {
|
||||
switch label {
|
||||
case "metadata.name", "spec.signerName", "spec.podName", "spec.nodeName":
|
||||
return label, value, nil
|
||||
default:
|
||||
return "", "", fmt.Errorf("field label not supported: %s", label)
|
||||
}
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while adding PodCertificateRequest field label conversion func: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,7 +16,9 @@ limitations under the License.
|
|||
|
||||
package v1alpha1
|
||||
|
||||
import "k8s.io/apimachinery/pkg/runtime"
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
|
|
|
|||
|
|
@ -18,18 +18,30 @@ package validation
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
utilcert "k8s.io/client-go/util/cert"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
apivalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -572,3 +584,373 @@ func validateTrustBundle(path *field.Path, in string) field.ErrorList {
|
|||
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// ValidatePodCertificateRequestCreate runs all validation checks on a pod certificate request create.
|
||||
func ValidatePodCertificateRequestCreate(req *certificates.PodCertificateRequest) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
|
||||
metaErrors := apivalidation.ValidateObjectMeta(&req.ObjectMeta, true, apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))
|
||||
allErrors = append(allErrors, metaErrors...)
|
||||
|
||||
signerNameErrors := apivalidation.ValidateSignerName(field.NewPath("spec", "signerName"), req.Spec.SignerName)
|
||||
allErrors = append(allErrors, signerNameErrors...)
|
||||
|
||||
for _, msg := range apivalidation.ValidatePodName(req.Spec.PodName, false) {
|
||||
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "podName"), req.Spec.PodName, msg))
|
||||
}
|
||||
if len(req.Spec.PodUID) == 0 {
|
||||
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "podUID"), req.Spec.PodUID, "must not be empty"))
|
||||
}
|
||||
if len(req.Spec.PodUID) > 128 {
|
||||
allErrors = append(allErrors, field.TooLong(field.NewPath("spec", "podUID"), req.Spec.PodUID, 128))
|
||||
}
|
||||
for _, msg := range apivalidation.ValidateServiceAccountName(req.Spec.ServiceAccountName, false) {
|
||||
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "serviceAccountName"), req.Spec.ServiceAccountName, msg))
|
||||
}
|
||||
if len(req.Spec.ServiceAccountUID) == 0 {
|
||||
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "serviceAccountUID"), req.Spec.ServiceAccountUID, "must not be empty"))
|
||||
}
|
||||
if len(req.Spec.ServiceAccountUID) > 128 {
|
||||
allErrors = append(allErrors, field.TooLong(field.NewPath("spec", "serviceAccountUID"), req.Spec.ServiceAccountUID, 128))
|
||||
}
|
||||
for _, msg := range apivalidation.ValidateNodeName(string(req.Spec.NodeName), false) {
|
||||
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "nodeName"), req.Spec.NodeName, msg))
|
||||
}
|
||||
if len(req.Spec.NodeUID) == 0 {
|
||||
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "nodeUID"), req.Spec.NodeUID, "must not be empty"))
|
||||
}
|
||||
if len(req.Spec.NodeUID) > 128 {
|
||||
allErrors = append(allErrors, field.TooLong(field.NewPath("spec", "nodeUID"), req.Spec.NodeUID, 128))
|
||||
}
|
||||
|
||||
if req.Spec.MaxExpirationSeconds == nil {
|
||||
allErrors = append(allErrors, field.Required(field.NewPath("spec", "maxExpirationSeconds"), "must be set"))
|
||||
return allErrors
|
||||
}
|
||||
if apivalidation.IsKubernetesSignerName(req.Spec.SignerName) {
|
||||
// Kubernetes signers are restricted to max 24 hour certs
|
||||
if !(certificates.MinMaxExpirationSeconds <= *req.Spec.MaxExpirationSeconds && *req.Spec.MaxExpirationSeconds <= certificates.KubernetesMaxMaxExpirationSeconds) {
|
||||
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "maxExpirationSeconds"), req.Spec.MaxExpirationSeconds, fmt.Sprintf("must be in the range [%d, %d]", certificates.MinMaxExpirationSeconds, certificates.KubernetesMaxMaxExpirationSeconds)))
|
||||
}
|
||||
} else {
|
||||
// All other signers are restricted to max 91 day certs.
|
||||
if !(certificates.MinMaxExpirationSeconds <= *req.Spec.MaxExpirationSeconds && *req.Spec.MaxExpirationSeconds <= certificates.MaxMaxExpirationSeconds) {
|
||||
allErrors = append(allErrors, field.Invalid(field.NewPath("spec", "maxExpirationSeconds"), req.Spec.MaxExpirationSeconds, fmt.Sprintf("must be in the range [%d, %d]", certificates.MinMaxExpirationSeconds, certificates.MaxMaxExpirationSeconds)))
|
||||
}
|
||||
}
|
||||
|
||||
if len(req.Spec.PKIXPublicKey) > certificates.MaxPKIXPublicKeySize {
|
||||
allErrors = append(allErrors, field.TooLong(field.NewPath("spec", "pkixPublicKey"), req.Spec.PKIXPublicKey, certificates.MaxPKIXPublicKeySize))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
if len(req.Spec.ProofOfPossession) > certificates.MaxProofOfPossessionSize {
|
||||
allErrors = append(allErrors, field.TooLong(field.NewPath("spec", "proofOfPossession"), req.Spec.ProofOfPossession, certificates.MaxProofOfPossessionSize))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
pubAny, err := x509.ParsePKIXPublicKey(req.Spec.PKIXPublicKey)
|
||||
if err != nil {
|
||||
allErrors = append(allErrors, field.Invalid(pkixPath, req.Spec.PKIXPublicKey, "must be a valid PKIX-serialized public key"))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// Verify public key properties and the proof-of-possession signature.
|
||||
switch pub := pubAny.(type) {
|
||||
case ed25519.PublicKey:
|
||||
// ed25519 has no key configuration to check
|
||||
if !ed25519.Verify(pub, []byte(req.Spec.PodUID), req.Spec.ProofOfPossession) {
|
||||
allErrors = append(allErrors, field.Invalid(popPath, field.OmitValueType{}, "could not verify proof-of-possession signature"))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
case *ecdsa.PublicKey:
|
||||
if pub.Curve != elliptic.P256() && pub.Curve != elliptic.P384() && pub.Curve != elliptic.P521() {
|
||||
allErrors = append(allErrors, field.Invalid(pkixPath, "curve "+pub.Curve.Params().Name, "elliptic public keys must use curve P256 or P384"))
|
||||
return allErrors
|
||||
}
|
||||
if !ecdsa.VerifyASN1(pub, hashBytes([]byte(req.Spec.PodUID)), req.Spec.ProofOfPossession) {
|
||||
allErrors = append(allErrors, field.Invalid(popPath, field.OmitValueType{}, "could not verify proof-of-possession signature"))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
case *rsa.PublicKey:
|
||||
if pub.Size()*8 != 3072 && pub.Size()*8 != 4096 {
|
||||
allErrors = append(allErrors, field.Invalid(pkixPath, fmt.Sprintf("%d-bit modulus", pub.Size()*8), "RSA keys must have modulus size 3072 or 4096"))
|
||||
return allErrors
|
||||
}
|
||||
if err := rsa.VerifyPSS(pub, crypto.SHA256, hashBytes([]byte(req.Spec.PodUID)), req.Spec.ProofOfPossession, nil); err != nil {
|
||||
allErrors = append(allErrors, field.Invalid(popPath, field.OmitValueType{}, "could not verify proof-of-possession signature"))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
default:
|
||||
allErrors = append(allErrors, field.Invalid(pkixPath, req.Spec.PKIXPublicKey, "unknown public key type; supported types are Ed25519, ECDSA, and RSA"))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
return allErrors
|
||||
}
|
||||
|
||||
func hashBytes(in []byte) []byte {
|
||||
out := sha256.Sum256(in)
|
||||
return out[:]
|
||||
}
|
||||
|
||||
var (
|
||||
pkixPath = field.NewPath("spec", "pkixPublicKey")
|
||||
popPath = field.NewPath("spec", "proofOfPossession")
|
||||
certChainPath = field.NewPath("status", "certificateChain")
|
||||
notBeforePath = field.NewPath("status", "notBefore")
|
||||
notAfterPath = field.NewPath("status", "notAfter")
|
||||
beginRefreshPath = field.NewPath("status", "beginRefreshAt")
|
||||
)
|
||||
|
||||
// ValidatePodCertificateRequestUpdate runs all update validation checks on a
|
||||
// non-status update.
|
||||
//
|
||||
// All spec fields are immutable after creation, and status updates must go
|
||||
// through the dedicated status update verb, so only metadata updates are
|
||||
// allowed.
|
||||
func ValidatePodCertificateRequestUpdate(newReq, oldReq *certificates.PodCertificateRequest) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
allErrors = append(allErrors, apivalidation.ValidateObjectMetaUpdate(&newReq.ObjectMeta, &oldReq.ObjectMeta, field.NewPath("metadata"))...)
|
||||
|
||||
// All spec fields are immutable.
|
||||
allErrors = append(allErrors, apivalidation.ValidateImmutableField(newReq.Spec, oldReq.Spec, field.NewPath("spec"))...)
|
||||
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// ValidatePodCertificateRequestStatusUpdate validates a status update for a
|
||||
// PodCertificateRequest.
|
||||
func ValidatePodCertificateRequestStatusUpdate(newReq, oldReq *certificates.PodCertificateRequest, clock clock.PassiveClock) field.ErrorList {
|
||||
var allErrors field.ErrorList
|
||||
|
||||
// Metadata is *mostly* immutable... ManagedFields is allowed to change. We
|
||||
// are reliant on the strategy that's calling us to have patched
|
||||
// newReq.ObjectMeta using metav1.ResetObjectMetaForStatus.
|
||||
allErrors = append(allErrors, apivalidation.ValidateObjectMetaUpdate(&newReq.ObjectMeta, &oldReq.ObjectMeta, field.NewPath("metadata"))...)
|
||||
if len(allErrors) > 0 {
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// Don't validate spec. Strategy has stomped it.
|
||||
|
||||
// There can be at most one of the known conditions, and it must have status "True"
|
||||
numKnownConditions := 0
|
||||
for i, cond := range newReq.Status.Conditions {
|
||||
switch cond.Type {
|
||||
case certificates.PodCertificateRequestConditionTypeIssued, certificates.PodCertificateRequestConditionTypeDenied, certificates.PodCertificateRequestConditionTypeFailed:
|
||||
numKnownConditions++
|
||||
if numKnownConditions > 1 {
|
||||
allErrors = append(allErrors, field.Invalid(field.NewPath("status", "conditions", formatIndex(i), "type"), cond.Type, `There may be at most one condition with type "Issued", "Denied", or "Failed"`))
|
||||
}
|
||||
if cond.Status != metav1.ConditionTrue {
|
||||
allErrors = append(allErrors, field.NotSupported(field.NewPath("status", "conditions", formatIndex(i), "status"), cond.Status, []metav1.ConditionStatus{metav1.ConditionTrue}))
|
||||
}
|
||||
default:
|
||||
allErrors = append(allErrors, field.NotSupported(field.NewPath("status", "conditions", formatIndex(i), "type"), cond.Type, []string{certificates.PodCertificateRequestConditionTypeIssued, certificates.PodCertificateRequestConditionTypeDenied, certificates.PodCertificateRequestConditionTypeFailed}))
|
||||
}
|
||||
}
|
||||
|
||||
allErrors = append(allErrors, metav1validation.ValidateConditions(newReq.Status.Conditions, field.NewPath("status", "conditions"))...)
|
||||
|
||||
// Bail if something seems wrong with the conditions --- we use the
|
||||
// conditions to drive validation of the remainder of the status fields.
|
||||
if len(allErrors) > 0 {
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// Is the original PCR in a terminal condition? If so, the entire status
|
||||
// field (including conditions) is immutable. No more changes are
|
||||
// permitted.
|
||||
if pcrIsIssued(oldReq) || pcrIsDenied(oldReq) || pcrIsFailed(oldReq) {
|
||||
allErrors = append(allErrors, validateSemanticEquality(newReq.Status, oldReq.Status, field.NewPath("status"), "immutable after PodCertificateRequest is issued, denied, or failed")...)
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// Are we transitioning to the "denied" or "failed" terminal conditions?
|
||||
if pcrIsDenied(newReq) || pcrIsFailed(newReq) {
|
||||
// No other status fields may change besides conditions.
|
||||
wantStatus := certificates.PodCertificateRequestStatus{
|
||||
Conditions: newReq.Status.Conditions,
|
||||
}
|
||||
allErrors = append(allErrors, validateSemanticEquality(newReq.Status, wantStatus, field.NewPath("status"), "non-condition status fields must be empty when denying or failing the PodCertificateRequest")...)
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// Are we transitioning to the "issued" terminal condition?
|
||||
if pcrIsIssued(newReq) {
|
||||
if len(newReq.Status.CertificateChain) > certificates.MaxCertificateChainSize {
|
||||
allErrors = append(allErrors, field.TooLong(field.NewPath("status", "certificateChain"), newReq.Status.CertificateChain, certificates.MaxCertificateChainSize))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
leafBlock, rest := pem.Decode([]byte(newReq.Status.CertificateChain))
|
||||
if leafBlock == nil {
|
||||
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "issued certificate chain must contain at least one certificate"))
|
||||
return allErrors
|
||||
}
|
||||
if leafBlock.Type != "CERTIFICATE" {
|
||||
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "issued certificate chain must consist entirely of CERTIFICATE PEM blocks"))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
leafCert, err := x509.ParseCertificate(leafBlock.Bytes)
|
||||
if err != nil {
|
||||
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "leaf certificate does not parse as valid X.509"))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// Was the certificate issued to the public key in the spec?
|
||||
wantPKAny, err := x509.ParsePKIXPublicKey(oldReq.Spec.PKIXPublicKey)
|
||||
if err != nil {
|
||||
allErrors = append(allErrors, field.Invalid(pkixPath, oldReq.Spec.PKIXPublicKey, "must be a valid PKIX-serialized public key"))
|
||||
return allErrors
|
||||
}
|
||||
switch wantPK := wantPKAny.(type) {
|
||||
case ed25519.PublicKey:
|
||||
if !wantPK.Equal(leafCert.PublicKey) {
|
||||
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "leaf certificate was not issued to the requested public key"))
|
||||
return allErrors
|
||||
}
|
||||
case *rsa.PublicKey:
|
||||
if !wantPK.Equal(leafCert.PublicKey) {
|
||||
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "leaf certificate was not issued to the requested public key"))
|
||||
return allErrors
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
if !wantPK.Equal(leafCert.PublicKey) {
|
||||
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "leaf certificate was not issued to the requested public key"))
|
||||
return allErrors
|
||||
}
|
||||
}
|
||||
|
||||
// All timestamps must be set.
|
||||
if newReq.Status.NotBefore == nil {
|
||||
allErrors = append(allErrors, field.Required(notBeforePath, "must be present and consistent with the issued certificate"))
|
||||
}
|
||||
if newReq.Status.NotAfter == nil {
|
||||
allErrors = append(allErrors, field.Required(notAfterPath, "must be present and consistent with the issued certificate"))
|
||||
}
|
||||
if newReq.Status.BeginRefreshAt == nil {
|
||||
allErrors = append(allErrors, field.Required(beginRefreshPath, "must be present and in the range [notbefore+10min, notafter-10min]"))
|
||||
}
|
||||
if len(allErrors) > 0 {
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// Validate that NotBefore is consistent with the status field, and within 5
|
||||
// minutes of the current time.
|
||||
if !newReq.Status.NotBefore.Time.Equal(leafCert.NotBefore) {
|
||||
allErrors = append(allErrors, field.Invalid(notBeforePath, newReq.Status.NotBefore.Time, "must be set to the NotBefore time encoded in the leaf certificate"))
|
||||
return allErrors
|
||||
}
|
||||
if !timeNear(newReq.Status.NotBefore.Time, clock.Now(), 5*time.Minute) {
|
||||
allErrors = append(allErrors, field.Invalid(notBeforePath, newReq.Status.NotBefore.Time, "must be set to within 5 minutes of kube-apiserver's current time"))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// Validate that NotAfter is consistent with the status field
|
||||
if !newReq.Status.NotAfter.Time.Equal(leafCert.NotAfter) {
|
||||
allErrors = append(allErrors, field.Invalid(notAfterPath, newReq.Status.NotAfter.Time, "must be set to the NotAfter time encoded in the leaf certificate"))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// Validate that leaf cert lifetime against minimum and maximum constraints.
|
||||
lifetime := leafCert.NotAfter.Sub(leafCert.NotBefore)
|
||||
if lifetime < 1*time.Hour {
|
||||
allErrors = append(allErrors, field.Invalid(certChainPath, lifetime, "leaf certificate lifetime must be >= 1 hour"))
|
||||
return allErrors
|
||||
}
|
||||
if lifetime > time.Duration(*newReq.Spec.MaxExpirationSeconds)*time.Second {
|
||||
allErrors = append(allErrors, field.Invalid(certChainPath, lifetime, fmt.Sprintf("leaf certificate lifetime must be <= spec.maxExpirationSeconds (%v)", *newReq.Spec.MaxExpirationSeconds)))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// Validate that BeginRefreshAt is within limits.
|
||||
if newReq.Status.BeginRefreshAt.Time.Before(newReq.Status.NotBefore.Time.Add(10 * time.Minute)) {
|
||||
allErrors = append(allErrors, field.Invalid(beginRefreshPath, newReq.Status.BeginRefreshAt.Time, "must be at least 10 minutes after status.notBefore"))
|
||||
return allErrors
|
||||
}
|
||||
if newReq.Status.BeginRefreshAt.Time.After(newReq.Status.NotAfter.Time.Add(-10 * time.Minute)) {
|
||||
allErrors = append(allErrors, field.Invalid(beginRefreshPath, newReq.Status.BeginRefreshAt.Time, "must be at least 10 minutes before status.notAfter"))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// Check the remainder of the certificates in the chain, if any. We cannot
|
||||
// easily verify the chain, because the Golang X.509 libraries are wisely
|
||||
// written to prevent us from doing stupid things like verifying a partial
|
||||
// chain, but we can at least check that they are valid certificates.
|
||||
for {
|
||||
var nextBlock *pem.Block
|
||||
nextBlock, rest = pem.Decode(rest)
|
||||
if nextBlock == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if nextBlock.Type != "CERTIFICATE" {
|
||||
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "issued certificate chain must consist entirely of CERTFICATE PEM blocks"))
|
||||
return allErrors
|
||||
}
|
||||
|
||||
_, err := x509.ParseCertificate(nextBlock.Bytes)
|
||||
if err != nil {
|
||||
allErrors = append(allErrors, field.Invalid(certChainPath, newReq.Status.CertificateChain, "intermediate certificate does not parse as valid X.509"))
|
||||
return allErrors
|
||||
}
|
||||
}
|
||||
|
||||
return allErrors
|
||||
}
|
||||
|
||||
// We are not transitioning to any terminal state. The whole status object
|
||||
// is immutable.
|
||||
allErrors = append(allErrors, validateSemanticEquality(newReq.Status, oldReq.Status, field.NewPath("status"), `status is immutable unless transitioning to "Issued", "Denied", or "Failed"`)...)
|
||||
return allErrors
|
||||
}
|
||||
|
||||
func pcrIsIssued(pcr *certificates.PodCertificateRequest) bool {
|
||||
for _, cond := range pcr.Status.Conditions {
|
||||
if cond.Type == certificates.PodCertificateRequestConditionTypeIssued && cond.Status == metav1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func pcrIsDenied(pcr *certificates.PodCertificateRequest) bool {
|
||||
for _, cond := range pcr.Status.Conditions {
|
||||
if cond.Type == certificates.PodCertificateRequestConditionTypeDenied && cond.Status == metav1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func pcrIsFailed(pcr *certificates.PodCertificateRequest) bool {
|
||||
for _, cond := range pcr.Status.Conditions {
|
||||
if cond.Type == certificates.PodCertificateRequestConditionTypeFailed && cond.Status == metav1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func formatIndex(i int) string {
|
||||
return "[" + strconv.Itoa(i) + "]"
|
||||
}
|
||||
|
||||
// Similar to apivalidation.ValidateImmutableField but we can supply our own detail string.
|
||||
func validateSemanticEquality(oldVal, newVal any, fldPath *field.Path, detail string) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if !apiequality.Semantic.DeepEqual(oldVal, newVal) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, field.OmitValueType{}, detail))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func timeNear(a, b time.Time, skew time.Duration) bool {
|
||||
return a.After(b.Add(-skew)) && a.Before(b.Add(skew))
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1855,6 +1855,61 @@ type ClusterTrustBundleProjection struct {
|
|||
Path string
|
||||
}
|
||||
|
||||
// PodCertificateProjection provides a private key and X.509 certificate in
|
||||
// a combined file.
|
||||
type PodCertificateProjection struct {
|
||||
// Kubelet's generated CSRs will be addressed to this signer.
|
||||
SignerName string
|
||||
|
||||
// The type of keypair Kubelet will generate for the pod.
|
||||
//
|
||||
// Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
|
||||
// "ECDSAP521", and "ED25519".
|
||||
KeyType string
|
||||
|
||||
// maxExpirationSeconds is the maximum lifetime permitted for the
|
||||
// certificate.
|
||||
//
|
||||
// Kubelet copies this value verbatim into the PodCertificateRequests it
|
||||
// generates for this projection.
|
||||
//
|
||||
// If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
|
||||
// will reject values shorter than 3600 (1 hour). The maximum allowable
|
||||
// value is 7862400 (91 days).
|
||||
//
|
||||
// The signer implementation is then free to issue a certificate with any
|
||||
// lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
|
||||
// seconds (1 hour). This constraint is enforced by kube-apiserver.
|
||||
// `kubernetes.io` signers will never issue certificates with a lifetime
|
||||
// longer than 24 hours.
|
||||
MaxExpirationSeconds *int32
|
||||
|
||||
// Write the credential bundle at this path in the projected volume.
|
||||
//
|
||||
// The credential bundle is a single file that contains multiple PEM blocks.
|
||||
// The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
|
||||
// key.
|
||||
//
|
||||
// The remaining blocks are CERTIFICATE blocks, containing the issued
|
||||
// certificate chain from the signer (leaf and any intermediates).
|
||||
//
|
||||
// Using credentialBundlePath lets your Pod's application code make a single
|
||||
// atomic read that retrieves a consistent key and certificate chain. If you
|
||||
// project them to separate files, your application code will need to
|
||||
// additionally check that the leaf certificate was issued to the key.
|
||||
CredentialBundlePath string
|
||||
|
||||
// Write the key at this path in the projected volume.
|
||||
//
|
||||
// When using keyPath and certificateChainPath, your application needs to check
|
||||
// that the key and leaf certificate are consistent, because it is possible to
|
||||
// read the files mid-rotation.
|
||||
KeyPath string
|
||||
|
||||
// Write the certificate chain at this path in the projected volume.
|
||||
CertificateChainPath string
|
||||
}
|
||||
|
||||
// ProjectedVolumeSource represents a projected volume source
|
||||
type ProjectedVolumeSource struct {
|
||||
// list of volume projections
|
||||
|
|
@ -1882,6 +1937,8 @@ type VolumeProjection struct {
|
|||
ServiceAccountToken *ServiceAccountTokenProjection
|
||||
// information about the ClusterTrustBundle data to project
|
||||
ClusterTrustBundle *ClusterTrustBundleProjection
|
||||
// information about the pod certificate to project.
|
||||
PodCertificate *PodCertificateProjection
|
||||
}
|
||||
|
||||
// KeyToPath maps a string key to a path within a volume.
|
||||
|
|
|
|||
|
|
@ -25,6 +25,12 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
// IsKubernetesSignerName checks if signerName is one reserved by the Kubernetes project.
|
||||
func IsKubernetesSignerName(signerName string) bool {
|
||||
hostName, _, _ := strings.Cut(signerName, "/")
|
||||
return hostName == "kubernetes.io" || strings.HasSuffix(hostName, ".kubernetes.io")
|
||||
}
|
||||
|
||||
// ValidateSignerName checks that signerName is syntactically valid.
|
||||
//
|
||||
// ensure signerName is of the form domain.com/something and up to 571 characters.
|
||||
|
|
|
|||
59
pkg/apis/core/validation/names_test.go
Normal file
59
pkg/apis/core/validation/names_test.go
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestIsKubernetesSignerName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "kubernetes.io",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "kubernetes.io/a",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "kubernetes.io/a/b.c/d.e",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "foo.kubernetes.io",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "fookubernetes.io",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "foo.com/a",
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := IsKubernetesSignerName(tc.name)
|
||||
if got != tc.want {
|
||||
t.Errorf("IsKubernetesSignerName(%q); got %v, want %v", tc.name, got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -51,6 +51,7 @@ import (
|
|||
resourcehelper "k8s.io/component-helpers/resource"
|
||||
schedulinghelper "k8s.io/component-helpers/scheduling/corev1"
|
||||
kubeletapis "k8s.io/kubelet/pkg/apis"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
|
||||
apiservice "k8s.io/kubernetes/pkg/api/service"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
|
|
@ -1248,6 +1249,69 @@ func validateProjectionSources(projection *core.ProjectedVolumeSource, projectio
|
|||
allErrs = append(allErrs, field.Invalid(fldPath, curPath, "conflicting duplicate paths"))
|
||||
}
|
||||
}
|
||||
if projPath := srcPath.Child("podCertificate"); source.PodCertificate != nil {
|
||||
numSources++
|
||||
|
||||
allErrs = append(allErrs, ValidateSignerName(projPath.Child("signerName"), source.PodCertificate.SignerName)...)
|
||||
|
||||
switch source.PodCertificate.KeyType {
|
||||
case "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", "ECDSAP521", "ED25519":
|
||||
// ok
|
||||
default:
|
||||
allErrs = append(allErrs, field.NotSupported(projPath.Child("keyType"), source.PodCertificate.KeyType, []string{"RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", "ECDSAP521", "ED25519"}))
|
||||
}
|
||||
|
||||
if source.PodCertificate.MaxExpirationSeconds != nil {
|
||||
if *source.PodCertificate.MaxExpirationSeconds < 3600 {
|
||||
allErrs = append(allErrs, field.Invalid(projPath.Child("maxExpirationSeconds"), *source.PodCertificate.MaxExpirationSeconds, "if provided, maxExpirationSeconds must be >= 3600"))
|
||||
}
|
||||
maxMaxExpirationSeconds := certificates.MaxMaxExpirationSeconds
|
||||
if IsKubernetesSignerName(source.PodCertificate.SignerName) {
|
||||
maxMaxExpirationSeconds = certificates.KubernetesMaxMaxExpirationSeconds
|
||||
}
|
||||
if *source.PodCertificate.MaxExpirationSeconds > int32(maxMaxExpirationSeconds) {
|
||||
allErrs = append(allErrs, field.Invalid(projPath.Child("maxExpirationSeconds"), *source.PodCertificate.MaxExpirationSeconds, fmt.Sprintf("if provided, maxExpirationSeconds must be <= %d", maxMaxExpirationSeconds)))
|
||||
}
|
||||
}
|
||||
|
||||
numPaths := 0
|
||||
if len(source.PodCertificate.CredentialBundlePath) != 0 {
|
||||
numPaths++
|
||||
// Credential bundle path must not be weird.
|
||||
allErrs = append(allErrs, ValidateLocalNonReservedPath(source.PodCertificate.CredentialBundlePath, projPath.Child("credentialBundlePath"))...)
|
||||
// Credential bundle path must not collide with a path from another source.
|
||||
if !allPaths.Has(source.PodCertificate.CredentialBundlePath) {
|
||||
allPaths.Insert(source.PodCertificate.CredentialBundlePath)
|
||||
} else {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, source.PodCertificate.CredentialBundlePath, "conflicting duplicate paths"))
|
||||
}
|
||||
}
|
||||
|
||||
if len(source.PodCertificate.KeyPath) != 0 {
|
||||
numPaths++
|
||||
allErrs = append(allErrs, ValidateLocalNonReservedPath(source.PodCertificate.KeyPath, projPath.Child("keyPath"))...)
|
||||
if !allPaths.Has(source.PodCertificate.KeyPath) {
|
||||
allPaths.Insert(source.PodCertificate.KeyPath)
|
||||
} else {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, source.PodCertificate.KeyPath, "conflicting duplicate paths"))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(source.PodCertificate.CertificateChainPath) != 0 {
|
||||
numPaths++
|
||||
allErrs = append(allErrs, ValidateLocalNonReservedPath(source.PodCertificate.CertificateChainPath, projPath.Child("certificateChainPath"))...)
|
||||
if !allPaths.Has(source.PodCertificate.CertificateChainPath) {
|
||||
allPaths.Insert(source.PodCertificate.CertificateChainPath)
|
||||
} else {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, source.PodCertificate.CertificateChainPath, "conflicting duplicate paths"))
|
||||
}
|
||||
}
|
||||
|
||||
if numPaths == 0 {
|
||||
allErrs = append(allErrs, field.Required(projPath, "specify at least one of credentialBundlePath, keyPath, and certificateChainPath"))
|
||||
}
|
||||
}
|
||||
if numSources > 1 {
|
||||
allErrs = append(allErrs, field.Forbidden(srcPath, "may not specify more than 1 volume type per source"))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10881,6 +10881,63 @@ func TestValidatePod(t *testing.T) {
|
|||
},
|
||||
}),
|
||||
),
|
||||
"valid PodCertificate projected volume source, minimal": *podtest.MakePod("valid-podcertificate-1",
|
||||
podtest.SetVolumes(core.Volume{
|
||||
Name: "projected-volume",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Projected: &core.ProjectedVolumeSource{
|
||||
Sources: []core.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &core.PodCertificateProjection{
|
||||
SignerName: "example.com/foo",
|
||||
KeyType: "ED25519",
|
||||
CredentialBundlePath: "credbundle.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
"valid PodCertificate projected volume source, explicit max expiration": *podtest.MakePod("valid-podcertificate-3",
|
||||
podtest.SetVolumes(core.Volume{
|
||||
Name: "projected-volume",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Projected: &core.ProjectedVolumeSource{
|
||||
Sources: []core.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &core.PodCertificateProjection{
|
||||
SignerName: "example.com/foo",
|
||||
KeyType: "ED25519",
|
||||
MaxExpirationSeconds: ptr.To[int32](3600),
|
||||
CredentialBundlePath: "credbundle.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
"valid PodCertificate projected volume source, separate key/cert": *podtest.MakePod("valid-podcertificate-4",
|
||||
podtest.SetVolumes(core.Volume{
|
||||
Name: "projected-volume",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Projected: &core.ProjectedVolumeSource{
|
||||
Sources: []core.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &core.PodCertificateProjection{
|
||||
SignerName: "example.com/foo",
|
||||
KeyType: "ED25519",
|
||||
MaxExpirationSeconds: ptr.To[int32](3600),
|
||||
KeyPath: "key.pem",
|
||||
CertificateChainPath: "certificates.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
"ephemeral volume + PVC, no conflict between them": *podtest.MakePod("valid-extended",
|
||||
podtest.SetVolumes(
|
||||
core.Volume{Name: "pvc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "my-pvc"}}},
|
||||
|
|
@ -12363,6 +12420,172 @@ func TestValidatePod(t *testing.T) {
|
|||
}),
|
||||
),
|
||||
},
|
||||
"PodCertificate projected volume with no signer name": {
|
||||
expectedError: "Required value",
|
||||
spec: *podtest.MakePod("pod1",
|
||||
podtest.SetVolumes(core.Volume{
|
||||
Name: "projected-volume",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Projected: &core.ProjectedVolumeSource{
|
||||
Sources: []core.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &core.PodCertificateProjection{
|
||||
KeyType: "ED25519",
|
||||
CredentialBundlePath: "credbundle.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
},
|
||||
"PodCertificate projected volume with bad signer name": {
|
||||
expectedError: "must be a fully qualified domain and path of the form",
|
||||
spec: *podtest.MakePod("pod1",
|
||||
podtest.SetVolumes(core.Volume{
|
||||
Name: "projected-volume",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Projected: &core.ProjectedVolumeSource{
|
||||
Sources: []core.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &core.PodCertificateProjection{
|
||||
SignerName: "example.com/foo/invalid",
|
||||
KeyType: "ED25519",
|
||||
CredentialBundlePath: "credbundle.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
},
|
||||
"PodCertificate projected volume with bad key type": {
|
||||
expectedError: "Unsupported value: \"BAD\"",
|
||||
spec: *podtest.MakePod("pod1",
|
||||
podtest.SetVolumes(core.Volume{
|
||||
Name: "projected-volume",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Projected: &core.ProjectedVolumeSource{
|
||||
Sources: []core.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &core.PodCertificateProjection{
|
||||
SignerName: "example.com/foo",
|
||||
KeyType: "BAD",
|
||||
CredentialBundlePath: "credbundle.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
},
|
||||
"PodCertificate projected volume with no paths": {
|
||||
expectedError: "Required value: specify at least one of credentialBundlePath, keyPath, and certificateChainPath",
|
||||
spec: *podtest.MakePod("pod1",
|
||||
podtest.SetVolumes(core.Volume{
|
||||
Name: "projected-volume",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Projected: &core.ProjectedVolumeSource{
|
||||
Sources: []core.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &core.PodCertificateProjection{
|
||||
SignerName: "example.com/foo",
|
||||
KeyType: "ED25519",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
},
|
||||
"PodCertificate projected volume with conflicting paths": {
|
||||
expectedError: "conflicting duplicate paths",
|
||||
spec: *podtest.MakePod("pod1",
|
||||
podtest.SetVolumes(core.Volume{
|
||||
Name: "projected-volume",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Projected: &core.ProjectedVolumeSource{
|
||||
Sources: []core.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &core.PodCertificateProjection{
|
||||
SignerName: "example.com/foo",
|
||||
KeyType: "ED25519",
|
||||
KeyPath: "same.pem",
|
||||
CertificateChainPath: "same.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
},
|
||||
"PodCertificate projected volume with bad cred bundle path": {
|
||||
expectedError: "must be a relative path",
|
||||
spec: *podtest.MakePod("pod1",
|
||||
podtest.SetVolumes(core.Volume{
|
||||
Name: "projected-volume",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Projected: &core.ProjectedVolumeSource{
|
||||
Sources: []core.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &core.PodCertificateProjection{
|
||||
SignerName: "example.com/foo",
|
||||
CredentialBundlePath: "/absolute.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
},
|
||||
"PodCertificate projected volume with bad key path": {
|
||||
expectedError: "must be a relative path",
|
||||
spec: *podtest.MakePod("pod1",
|
||||
podtest.SetVolumes(core.Volume{
|
||||
Name: "projected-volume",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Projected: &core.ProjectedVolumeSource{
|
||||
Sources: []core.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &core.PodCertificateProjection{
|
||||
SignerName: "example.com/foo",
|
||||
KeyPath: "/absolute.pem",
|
||||
CertificateChainPath: "certificates.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
},
|
||||
"PodCertificate projected volume with bad certificates path": {
|
||||
expectedError: "must be a relative path",
|
||||
spec: *podtest.MakePod("pod1",
|
||||
podtest.SetVolumes(core.Volume{
|
||||
Name: "projected-volume",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Projected: &core.ProjectedVolumeSource{
|
||||
Sources: []core.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &core.PodCertificateProjection{
|
||||
SignerName: "example.com/foo",
|
||||
KeyPath: "key.pem",
|
||||
CertificateChainPath: "/certificates.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
},
|
||||
"final PVC name for ephemeral volume must be valid": {
|
||||
expectedError: "spec.volumes[1].name: Invalid value: \"" + longVolName + "\": PVC name \"" + longPodName + "-" + longVolName + "\": must be no more than 253 characters",
|
||||
spec: *podtest.MakePod(longPodName,
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package certificates
|
||||
package certauthorization
|
||||
|
||||
import (
|
||||
"context"
|
||||
110
pkg/controller/certificates/cleaner/pcrcleaner.go
Normal file
110
pkg/controller/certificates/cleaner/pcrcleaner.go
Normal file
|
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cleaner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
certsv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
certinformersv1alpha1 "k8s.io/client-go/informers/certificates/v1alpha1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
certlistersv1alpha1 "k8s.io/client-go/listers/certificates/v1alpha1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
// PCRCleanerController garbage-collects PodCertificateRequests older than 30
|
||||
// minutes.
|
||||
type PCRCleanerController struct {
|
||||
client kubernetes.Interface
|
||||
pcrLister certlistersv1alpha1.PodCertificateRequestLister
|
||||
clock clock.PassiveClock
|
||||
threshold time.Duration
|
||||
pollingInterval time.Duration
|
||||
}
|
||||
|
||||
// NewPCRCleanerController creates a PCRCleanerController.
|
||||
func NewPCRCleanerController(
|
||||
client kubernetes.Interface,
|
||||
pcrLister certinformersv1alpha1.PodCertificateRequestInformer,
|
||||
clock clock.PassiveClock,
|
||||
threshold time.Duration,
|
||||
pollingInterval time.Duration,
|
||||
) *PCRCleanerController {
|
||||
return &PCRCleanerController{
|
||||
client: client,
|
||||
pcrLister: pcrLister.Lister(),
|
||||
clock: clock,
|
||||
threshold: threshold,
|
||||
pollingInterval: pollingInterval,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *PCRCleanerController) Run(ctx context.Context, workers int) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.Info("Starting PodCertificateRequest cleaner controller")
|
||||
defer logger.Info("Shutting down PodCertificateRequest cleaner controller")
|
||||
|
||||
go wait.UntilWithContext(ctx, c.worker, c.pollingInterval)
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (c *PCRCleanerController) worker(ctx context.Context) {
|
||||
pcrs, err := c.pcrLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
utilruntime.HandleErrorWithContext(ctx, err, "Unable to list PodCertificateRequests")
|
||||
return
|
||||
}
|
||||
for _, pcr := range pcrs {
|
||||
if err := c.handle(ctx, pcr); err != nil {
|
||||
utilruntime.HandleErrorWithContext(ctx, err, "Error while attempting to clean PodCertificateRequest", "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c PCRCleanerController) handle(ctx context.Context, pcr *certsv1alpha1.PodCertificateRequest) error {
|
||||
if c.clock.Now().Before(pcr.ObjectMeta.CreationTimestamp.Time.Add(c.threshold)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
opts := metav1.DeleteOptions{
|
||||
Preconditions: &metav1.Preconditions{
|
||||
UID: ptr.To(pcr.ObjectMeta.UID),
|
||||
},
|
||||
}
|
||||
|
||||
err := c.client.CertificatesV1alpha1().PodCertificateRequests(pcr.ObjectMeta.Namespace).Delete(ctx, pcr.ObjectMeta.Name, opts)
|
||||
if k8serrors.IsNotFound(err) {
|
||||
// This is OK, we don't care if someone else already deleted it.
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("unable to delete PodCertificateRequest %q: %w", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
220
pkg/controller/certificates/cleaner/pcrcleaner_test.go
Normal file
220
pkg/controller/certificates/cleaner/pcrcleaner_test.go
Normal file
|
|
@ -0,0 +1,220 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cleaner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
certsv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
testclock "k8s.io/utils/clock/testing"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func TestPCRCleaner(t *testing.T) {
|
||||
now := mustParseRFC3339(t, "2025-01-01T00:30:00Z")
|
||||
clock := testclock.NewFakePassiveClock(now)
|
||||
|
||||
podUID1 := "pod-1-uid"
|
||||
_, _, pubPKIX1, proof1 := mustMakeEd25519KeyAndProof(t, []byte(podUID1))
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
pcr *certsv1alpha1.PodCertificateRequest
|
||||
wantErrRecognizer func(error) bool
|
||||
}{
|
||||
{
|
||||
desc: "Pending request within the threshold should be left alone",
|
||||
pcr: &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
CreationTimestamp: metav1.NewTime(mustParseRFC3339(t, "2025-01-01T00:15:01Z")),
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID(podUID1),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
},
|
||||
wantErrRecognizer: errorIsNil,
|
||||
},
|
||||
{
|
||||
desc: "Pending request outside the threshold should be deleted",
|
||||
pcr: &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
CreationTimestamp: metav1.NewTime(mustParseRFC3339(t, "2025-01-01T00:14:59Z")),
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID(podUID1),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
},
|
||||
wantErrRecognizer: k8serrors.IsNotFound,
|
||||
},
|
||||
{
|
||||
desc: "Terminal request within the threshold should be left alone",
|
||||
pcr: &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
CreationTimestamp: metav1.NewTime(mustParseRFC3339(t, "2025-01-01T00:15:01Z")),
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID(podUID1),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
Status: certsv1alpha1.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: certsv1alpha1.PodCertificateRequestConditionTypeDenied,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Foo",
|
||||
Message: "abc",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrRecognizer: errorIsNil,
|
||||
},
|
||||
{
|
||||
desc: "Terminal request outside the threshold should be deleted",
|
||||
pcr: &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
CreationTimestamp: metav1.NewTime(mustParseRFC3339(t, "2025-01-01T00:14:59Z")),
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID(podUID1),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
Status: certsv1alpha1.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: certsv1alpha1.PodCertificateRequestConditionTypeDenied,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Foo",
|
||||
Message: "abc",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrRecognizer: k8serrors.IsNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
kc := fake.NewClientset(tc.pcr)
|
||||
cleaner := &PCRCleanerController{
|
||||
client: kc,
|
||||
clock: clock,
|
||||
threshold: 15 * time.Minute,
|
||||
pollingInterval: 1 * time.Minute,
|
||||
}
|
||||
|
||||
// Simulate a pass of the cleaner worker by listing all PCRs and
|
||||
// calling handle() on them.
|
||||
|
||||
pcrList, err := kc.CertificatesV1alpha1().PodCertificateRequests(metav1.NamespaceAll).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error listing PCRs: %v", err)
|
||||
}
|
||||
for _, pcr := range pcrList.Items {
|
||||
if err := cleaner.handle(ctx, &pcr); err != nil {
|
||||
t.Fatalf("Unexpected error calling handle(): %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Now check on the test case's PCR, to see if it was deleted or not
|
||||
// according to our expectation.
|
||||
_, err = kc.CertificatesV1alpha1().PodCertificateRequests(tc.pcr.ObjectMeta.Namespace).Get(ctx, tc.pcr.ObjectMeta.Name, metav1.GetOptions{})
|
||||
if !tc.wantErrRecognizer(err) {
|
||||
t.Errorf("Bad error output: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func errorIsNil(err error) bool {
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func mustParseRFC3339(t *testing.T, stamp string) time.Time {
|
||||
parsed, err := time.Parse(time.RFC3339, stamp)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error parsing time: %v", err)
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
func mustMakeEd25519KeyAndProof(t *testing.T, toBeSigned []byte) (ed25519.PrivateKey, ed25519.PublicKey, []byte, []byte) {
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while generating ed25519 key: %v", err)
|
||||
}
|
||||
pubPKIX, err := x509.MarshalPKIXPublicKey(pub)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while marshaling PKIX public key: %v", err)
|
||||
}
|
||||
sig := ed25519.Sign(priv, toBeSigned)
|
||||
return priv, pub, pubPKIX, sig
|
||||
}
|
||||
|
|
@ -74,7 +74,7 @@ func (c *CompletedConfig) GenericStorageProviders(discovery discovery.DiscoveryI
|
|||
apiserverinternalrest.StorageProvider{},
|
||||
authenticationrest.RESTStorageProvider{Authenticator: c.Generic.Authentication.Authenticator, APIAudiences: c.Generic.Authentication.APIAudiences},
|
||||
authorizationrest.RESTStorageProvider{Authorizer: c.Generic.Authorization.Authorizer, RuleResolver: c.Generic.RuleResolver},
|
||||
certificatesrest.RESTStorageProvider{},
|
||||
certificatesrest.RESTStorageProvider{Authorizer: c.Generic.Authorization.Authorizer},
|
||||
coordinationrest.RESTStorageProvider{},
|
||||
rbacrest.RESTStorageProvider{Authorizer: c.Generic.Authorization.Authorizer},
|
||||
svmrest.RESTStorageProvider{},
|
||||
|
|
|
|||
|
|
@ -78,6 +78,13 @@ func validateNodeSelectorAuthorizationFeature() []error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func validatePodCertificateRequestFeature() []error {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodCertificateRequest) && !utilfeature.DefaultFeatureGate.Enabled(features.AuthorizeNodeWithSelectors) {
|
||||
return []error{fmt.Errorf("PodCertificateRequest feature requires AuthorizeNodeWithSelectors feature to be enabled")}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateUnknownVersionInteroperabilityProxyFlags(options *Options) []error {
|
||||
err := []error{}
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.UnknownVersionInteroperabilityProxy) {
|
||||
|
|
@ -145,6 +152,7 @@ func (s *Options) Validate() []error {
|
|||
errs = append(errs, s.Metrics.Validate()...)
|
||||
errs = append(errs, validateUnknownVersionInteroperabilityProxyFlags(s)...)
|
||||
errs = append(errs, validateNodeSelectorAuthorizationFeature()...)
|
||||
errs = append(errs, validatePodCertificateRequestFeature()...)
|
||||
errs = append(errs, validateServiceAccountTokenSigningConfig(s)...)
|
||||
errs = append(errs, validateCoordinatedLeadershipFlags(s)...)
|
||||
|
||||
|
|
|
|||
|
|
@ -414,7 +414,7 @@ func (c CompletedConfig) StorageProviders(client *kubernetes.Clientset) ([]contr
|
|||
authorizationrest.RESTStorageProvider{Authorizer: c.ControlPlane.Generic.Authorization.Authorizer, RuleResolver: c.ControlPlane.Generic.RuleResolver},
|
||||
autoscalingrest.RESTStorageProvider{},
|
||||
batchrest.RESTStorageProvider{},
|
||||
certificatesrest.RESTStorageProvider{},
|
||||
certificatesrest.RESTStorageProvider{Authorizer: c.ControlPlane.Generic.Authorization.Authorizer},
|
||||
coordinationrest.RESTStorageProvider{},
|
||||
discoveryrest.StorageProvider{},
|
||||
networkingrest.RESTStorageProvider{},
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ import (
|
|||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
|
||||
openapinamer "k8s.io/apiserver/pkg/endpoints/openapi"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
|
|
@ -199,7 +200,11 @@ func TestCertificatesRestStorageStrategies(t *testing.T) {
|
|||
_, etcdserver, apiserverCfg, _ := newInstance(t)
|
||||
defer etcdserver.Terminate(t)
|
||||
|
||||
certStorageProvider := certificatesrest.RESTStorageProvider{}
|
||||
certStorageProvider := certificatesrest.RESTStorageProvider{
|
||||
Authorizer: &fakeAuthorizer{
|
||||
decision: authorizer.DecisionAllow,
|
||||
},
|
||||
}
|
||||
apiGroupInfo, err := certStorageProvider.NewRESTStorage(apiserverCfg.ControlPlane.APIResourceConfigSource, apiserverCfg.ControlPlane.Generic.RESTOptionsGetter)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from REST storage: %v", err)
|
||||
|
|
@ -212,6 +217,16 @@ func TestCertificatesRestStorageStrategies(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type fakeAuthorizer struct {
|
||||
decision authorizer.Decision
|
||||
reason string
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
return f.decision, f.reason, f.err
|
||||
}
|
||||
|
||||
func newInstance(t *testing.T) (*Instance, *etcd3testing.EtcdTestServer, CompletedConfig, *assert.Assertions) {
|
||||
etcdserver, config, assert := setUp(t)
|
||||
|
||||
|
|
|
|||
|
|
@ -617,6 +617,12 @@ const (
|
|||
// Requires the CRI implementation supports supplying the required stats.
|
||||
PodAndContainerStatsFromCRI featuregate.Feature = "PodAndContainerStatsFromCRI"
|
||||
|
||||
// owner: @ahmedtd
|
||||
// kep: https://kep.k8s.io/4317
|
||||
//
|
||||
// Enable PodCertificateRequest objects and podCertificate projected volume sources.
|
||||
PodCertificateRequest featuregate.Feature = "PodCertificateRequest"
|
||||
|
||||
// owner: @ahg-g
|
||||
//
|
||||
// Enables controlling pod ranking on replicaset scale-down.
|
||||
|
|
@ -1447,6 +1453,10 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
|||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
PodCertificateRequest: {
|
||||
{Version: version.MustParse("1.34"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
PodDeletionCost: {
|
||||
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.22"), Default: true, PreRelease: featuregate.Beta},
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ import (
|
|||
authorizationcel "k8s.io/apiserver/pkg/authorization/cel"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
versionedinformers "k8s.io/client-go/informers"
|
||||
certinformersv1alpha1 "k8s.io/client-go/informers/certificates/v1alpha1"
|
||||
resourceinformers "k8s.io/client-go/informers/resource/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/auth/authorizer/abac"
|
||||
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
|
||||
|
|
@ -105,6 +106,10 @@ func (config Config) New(ctx context.Context, serverID string) (authorizer.Autho
|
|||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
|
||||
slices = config.VersionedInformerFactory.Resource().V1beta1().ResourceSlices()
|
||||
}
|
||||
var podCertificateRequestInformer certinformersv1alpha1.PodCertificateRequestInformer
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodCertificateRequest) {
|
||||
podCertificateRequestInformer = config.VersionedInformerFactory.Certificates().V1alpha1().PodCertificateRequests()
|
||||
}
|
||||
node.RegisterMetrics()
|
||||
graph := node.NewGraph()
|
||||
node.AddGraphEventHandlers(
|
||||
|
|
@ -114,6 +119,7 @@ func (config Config) New(ctx context.Context, serverID string) (authorizer.Autho
|
|||
config.VersionedInformerFactory.Core().V1().PersistentVolumes(),
|
||||
config.VersionedInformerFactory.Storage().V1().VolumeAttachments(),
|
||||
slices, // Nil check in AddGraphEventHandlers can be removed when always creating this.
|
||||
podCertificateRequestInformer,
|
||||
)
|
||||
r.nodeAuthorizer = node.NewAuthorizer(graph, nodeidentifier.NewDefaultNodeIdentifier(), bootstrappolicy.NodeRules())
|
||||
|
||||
|
|
|
|||
|
|
@ -84,6 +84,7 @@ func NewStorageFactoryConfigEffectiveVersion(effectiveVersion basecompatibility.
|
|||
admissionregistration.Resource("mutatingadmissionpolicies").WithVersion("v1alpha1"),
|
||||
admissionregistration.Resource("mutatingadmissionpolicybindings").WithVersion("v1alpha1"),
|
||||
certificates.Resource("clustertrustbundles").WithVersion("v1beta1"),
|
||||
certificates.Resource("podcertificaterequests").WithVersion("v1alpha1"),
|
||||
storage.Resource("volumeattributesclasses").WithVersion("v1beta1"),
|
||||
storagemigration.Resource("storagemigrations").WithVersion("v1alpha1"),
|
||||
resource.Resource("devicetaintrules").WithVersion("v1alpha3"),
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
coreinformersv1 "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
|
|
@ -106,6 +107,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager"
|
||||
plugincache "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet/podcertificate"
|
||||
"k8s.io/kubernetes/pkg/kubelet/preemption"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober"
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
|
|
@ -453,6 +455,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
}
|
||||
|
||||
var nodeHasSynced cache.InformerSynced
|
||||
var nodeInformer coreinformersv1.NodeInformer
|
||||
var nodeLister corelisters.NodeLister
|
||||
|
||||
// If kubeClient == nil, we are running in standalone mode (i.e. no API servers)
|
||||
|
|
@ -461,7 +464,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
||||
options.FieldSelector = fields.Set{metav1.ObjectNameField: string(nodeName)}.String()
|
||||
}))
|
||||
nodeLister = kubeInformers.Core().V1().Nodes().Lister()
|
||||
nodeInformer = kubeInformers.Core().V1().Nodes()
|
||||
nodeLister = nodeInformer.Lister()
|
||||
nodeHasSynced = func() bool {
|
||||
return kubeInformers.Core().V1().Nodes().Informer().HasSynced()
|
||||
}
|
||||
|
|
@ -932,11 +936,34 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
klog.InfoS("Not starting ClusterTrustBundle informer because we are in static kubelet mode or the ClusterTrustBundleProjection featuregate is disabled")
|
||||
}
|
||||
|
||||
if kubeDeps.KubeClient != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodCertificateRequest) {
|
||||
kubeInformers := informers.NewSharedInformerFactoryWithOptions(
|
||||
kubeDeps.KubeClient,
|
||||
0,
|
||||
informers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
||||
options.FieldSelector = fields.OneTermEqualSelector("spec.nodeName", string(nodeName)).String()
|
||||
}),
|
||||
)
|
||||
podCertificateManager := podcertificate.NewIssuingManager(
|
||||
kubeDeps.KubeClient,
|
||||
klet.podManager,
|
||||
kubeInformers.Certificates().V1alpha1().PodCertificateRequests(),
|
||||
nodeInformer,
|
||||
nodeName,
|
||||
clock.RealClock{},
|
||||
)
|
||||
klet.podCertificateManager = podCertificateManager
|
||||
kubeInformers.Start(ctx.Done())
|
||||
go podCertificateManager.Run(ctx)
|
||||
} else {
|
||||
klet.podCertificateManager = &podcertificate.NoOpManager{}
|
||||
klog.InfoS("Not starting PodCertificateRequest manager because we are in static kubelet mode or the PodCertificateProjection feature gate is disabled")
|
||||
}
|
||||
|
||||
// NewInitializedVolumePluginMgr initializes some storageErrors on the Kubelet runtimeState (in csi_plugin.go init)
|
||||
// which affects node ready status. This function must be called before Kubelet is initialized so that the Node
|
||||
// ReadyState is accurate with the storage state.
|
||||
klet.volumePluginMgr, err =
|
||||
NewInitializedVolumePluginMgr(klet, secretManager, configMapManager, tokenManager, clusterTrustBundleManager, kubeDeps.VolumePlugins, kubeDeps.DynamicPluginProber)
|
||||
klet.volumePluginMgr, err = NewInitializedVolumePluginMgr(klet, secretManager, configMapManager, tokenManager, clusterTrustBundleManager, kubeDeps.VolumePlugins, kubeDeps.DynamicPluginProber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -1172,6 +1199,11 @@ type Kubelet struct {
|
|||
// allocationManager manages allocated resources for pods.
|
||||
allocationManager allocation.Manager
|
||||
|
||||
// podCertificateManager is fed updates as pods are added and removed from
|
||||
// the node, and requests certificates for them based on their configured
|
||||
// pod certificate volumes.
|
||||
podCertificateManager podcertificate.Manager
|
||||
|
||||
// resyncInterval is the interval between periodic full reconciliations of
|
||||
// pods on this node.
|
||||
resyncInterval time.Duration
|
||||
|
|
@ -2596,6 +2628,8 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) {
|
|||
// the apiserver and no action (other than cleanup) is required.
|
||||
kl.podManager.AddPod(pod)
|
||||
|
||||
kl.podCertificateManager.TrackPod(context.TODO(), pod)
|
||||
|
||||
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
|
||||
if wasMirror {
|
||||
if pod == nil {
|
||||
|
|
@ -2769,6 +2803,7 @@ func resizeOperationForResources(new, old *resource.Quantity) string {
|
|||
func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) {
|
||||
start := kl.clock.Now()
|
||||
for _, pod := range pods {
|
||||
kl.podCertificateManager.ForgetPod(context.TODO(), pod)
|
||||
kl.podManager.RemovePod(pod)
|
||||
kl.allocationManager.RemovePod(pod.UID)
|
||||
|
||||
|
|
|
|||
|
|
@ -89,6 +89,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/podcertificate"
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
probetest "k8s.io/kubernetes/pkg/kubelet/prober/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
|
|
@ -298,6 +299,7 @@ func newTestKubeletWithImageList(
|
|||
podStartupLatencyTracker := kubeletutil.NewPodStartupLatencyTracker()
|
||||
kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{}, podStartupLatencyTracker)
|
||||
kubelet.nodeStartupLatencyTracker = kubeletutil.NewNodeStartupLatencyTracker()
|
||||
kubelet.podCertificateManager = &podcertificate.NoOpManager{}
|
||||
|
||||
kubelet.containerRuntime = fakeRuntime
|
||||
kubelet.runtimeCache = containertest.NewFakeRuntimeCache(kubelet.containerRuntime)
|
||||
|
|
|
|||
822
pkg/kubelet/podcertificate/podcertificatemanager.go
Normal file
822
pkg/kubelet/podcertificate/podcertificatemanager.go
Normal file
|
|
@ -0,0 +1,822 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podcertificate
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
mathrand "math/rand/v2"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
certinformersv1alpha1 "k8s.io/client-go/informers/certificates/v1alpha1"
|
||||
coreinformersv1 "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
certlistersv1alpha1 "k8s.io/client-go/listers/certificates/v1alpha1"
|
||||
corelistersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// PodManager is a local wrapper interface for pod.Manager.
|
||||
type PodManager interface {
|
||||
GetPodByUID(uid types.UID) (*corev1.Pod, bool)
|
||||
GetPods() []*corev1.Pod
|
||||
}
|
||||
|
||||
// Manager abstracts the functionality needed by Kubelet and the volume host in
|
||||
// order to provide pod certificate functionality.
|
||||
type Manager interface {
|
||||
// TrackPod is called by Kubelet every time a new pod is assigned to the node.
|
||||
TrackPod(ctx context.Context, pod *corev1.Pod)
|
||||
// ForgetPod is called by Kubelet every time a pod is dropped from the node.
|
||||
ForgetPod(ctx context.Context, pod *corev1.Pod)
|
||||
|
||||
// GetPodCertificateCredentialBundle is called by the volume host to
|
||||
// retrieve the credential bundle for a given pod certificate volume.
|
||||
GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) (privKey []byte, certChain []byte, err error)
|
||||
}
|
||||
|
||||
// After this amount of time (plus jitter), we can assume that a PCR that we
|
||||
// created, but isn't showing up on our informer, must have been deleted.
|
||||
const assumeDeletedThreshold = 10 * time.Minute
|
||||
|
||||
// IssuingManager is the main implementation of Manager.
|
||||
//
|
||||
// The core construct is a workqueue that contains one entry for each
|
||||
// PodCertificateProjection (tracked with a podname/volumename/sourceindex
|
||||
// tuple) in each non-mirror Pod scheduled to the node. Everytime anything
|
||||
// interesting happens to a PodCertificateRequest or Pod, we redrive all of the
|
||||
// potentially-affected PodCertificateProjections into the workqueue.
|
||||
//
|
||||
// State is not preserved across restarts --- if Kubelet or the node restarts,
|
||||
// then all PodCertificateProjections will be queued for immediate refresh.
|
||||
//
|
||||
// Refresh is handled by periodicially redriving all PodCertificateProjections
|
||||
// into the queue.
|
||||
type IssuingManager struct {
|
||||
kc kubernetes.Interface
|
||||
|
||||
podManager PodManager
|
||||
|
||||
projectionQueue workqueue.TypedRateLimitingInterface[projectionKey]
|
||||
|
||||
pcrInformer cache.SharedIndexInformer
|
||||
pcrLister certlistersv1alpha1.PodCertificateRequestLister
|
||||
|
||||
nodeInformer cache.SharedIndexInformer
|
||||
nodeLister corelistersv1.NodeLister
|
||||
nodeName types.NodeName
|
||||
|
||||
clock clock.PassiveClock
|
||||
|
||||
// lock covers credStore
|
||||
lock sync.Mutex
|
||||
credStore map[projectionKey]*projectionRecord
|
||||
}
|
||||
|
||||
type projectionKey struct {
|
||||
namespace string
|
||||
podName string
|
||||
podUID string
|
||||
volumeName string
|
||||
sourceIndex int
|
||||
}
|
||||
|
||||
type projectionRecord struct {
|
||||
// lock covers all fields within projectionRecord.
|
||||
lock sync.Mutex
|
||||
|
||||
// The state machine for this projection:
|
||||
//
|
||||
//
|
||||
// ┌─────────────────┐
|
||||
// ▼ │
|
||||
// fresh ────► wait ────► fresh ──────► waitrefresh
|
||||
// │ │
|
||||
// ├──────► denied ◄───────────┤
|
||||
// │ │
|
||||
// └──────► failed ◄───────────┘
|
||||
curState credState
|
||||
}
|
||||
|
||||
// Interface type for all projection record states.
|
||||
type credState interface {
|
||||
getCredBundle() (privKey, certChain []byte, err error)
|
||||
}
|
||||
|
||||
type credStateInitial struct {
|
||||
}
|
||||
|
||||
func (c *credStateInitial) getCredBundle() ([]byte, []byte, error) {
|
||||
return nil, nil, fmt.Errorf("credential bundle is not issued yet")
|
||||
}
|
||||
|
||||
type credStateWait struct {
|
||||
privateKey []byte
|
||||
pcrName string
|
||||
// If it has reached this time and the PCR isn't showing up on the informer,
|
||||
// assume that it was deleted.
|
||||
pcrAbandonAt time.Time
|
||||
}
|
||||
|
||||
func (c *credStateWait) getCredBundle() ([]byte, []byte, error) {
|
||||
return nil, nil, fmt.Errorf("credential bundle is not issued yet")
|
||||
}
|
||||
|
||||
type credStateDenied struct {
|
||||
Reason string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (c *credStateDenied) getCredBundle() ([]byte, []byte, error) {
|
||||
return nil, nil, fmt.Errorf("PodCertificateRequest was permanently denied: reason=%q message=%q", c.Reason, c.Message)
|
||||
}
|
||||
|
||||
type credStateFailed struct {
|
||||
Reason string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (c *credStateFailed) getCredBundle() ([]byte, []byte, error) {
|
||||
return nil, nil, fmt.Errorf("PodCertificateRequest was permanently failed: reason=%q message=%q", c.Reason, c.Message)
|
||||
}
|
||||
|
||||
type credStateFresh struct {
|
||||
privateKey []byte
|
||||
certChain []byte
|
||||
beginRefreshAt time.Time
|
||||
}
|
||||
|
||||
func (c *credStateFresh) getCredBundle() ([]byte, []byte, error) {
|
||||
return c.privateKey, c.certChain, nil
|
||||
}
|
||||
|
||||
type credStateWaitRefresh struct {
|
||||
privateKey []byte
|
||||
certChain []byte
|
||||
beginRefreshAt time.Time
|
||||
|
||||
refreshPrivateKey []byte
|
||||
refreshPCRName string
|
||||
// If it has reached this time and the PCR isn't showing up on the informer,
|
||||
// assume that it was deleted.
|
||||
refreshPCRAbandonAt time.Time
|
||||
}
|
||||
|
||||
func (c *credStateWaitRefresh) getCredBundle() ([]byte, []byte, error) {
|
||||
return c.privateKey, c.certChain, nil
|
||||
}
|
||||
|
||||
var _ Manager = (*IssuingManager)(nil)
|
||||
|
||||
func NewIssuingManager(kc kubernetes.Interface, podManager PodManager, pcrInformer certinformersv1alpha1.PodCertificateRequestInformer, nodeInformer coreinformersv1.NodeInformer, nodeName types.NodeName, clock clock.WithTicker) *IssuingManager {
|
||||
m := &IssuingManager{
|
||||
kc: kc,
|
||||
|
||||
podManager: podManager,
|
||||
projectionQueue: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[projectionKey]()),
|
||||
|
||||
pcrInformer: pcrInformer.Informer(),
|
||||
pcrLister: pcrInformer.Lister(),
|
||||
nodeInformer: nodeInformer.Informer(),
|
||||
nodeLister: nodeInformer.Lister(),
|
||||
nodeName: nodeName,
|
||||
clock: clock,
|
||||
|
||||
credStore: map[projectionKey]*projectionRecord{},
|
||||
}
|
||||
|
||||
// Add informer functions for PodCertificateRequests. In all cases, we just
|
||||
// queue the corresponding PodCertificateProjections for re-processing.
|
||||
// This is not needed for correctness, since volumeSourceQueue backoffs will
|
||||
// eventually trigger the volume to be inspected. However, it's a better UX
|
||||
// for us to notice immediately once the certificate is issued.
|
||||
m.pcrInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj any) {
|
||||
pcr := obj.(*certificatesv1alpha1.PodCertificateRequest)
|
||||
m.queueAllProjectionsForPod(pcr.Spec.PodUID)
|
||||
},
|
||||
UpdateFunc: func(old, new any) {
|
||||
pcr := new.(*certificatesv1alpha1.PodCertificateRequest)
|
||||
m.queueAllProjectionsForPod(pcr.Spec.PodUID)
|
||||
},
|
||||
DeleteFunc: func(obj any) {
|
||||
pcr := obj.(*certificatesv1alpha1.PodCertificateRequest)
|
||||
m.queueAllProjectionsForPod(pcr.Spec.PodUID)
|
||||
},
|
||||
})
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *IssuingManager) queueAllProjectionsForPod(uid types.UID) {
|
||||
pod, ok := m.podManager.GetPodByUID(uid)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
if v.Projected == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for sourceIndex, source := range v.Projected.Sources {
|
||||
if source.PodCertificate == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
m.projectionQueue.Add(projectionKey{
|
||||
namespace: pod.ObjectMeta.Namespace,
|
||||
podName: pod.ObjectMeta.Name,
|
||||
podUID: string(pod.ObjectMeta.UID),
|
||||
volumeName: v.Name,
|
||||
sourceIndex: sourceIndex,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *IssuingManager) Run(ctx context.Context) {
|
||||
klog.InfoS("podcertificate.IssuingManager starting up")
|
||||
if !cache.WaitForCacheSync(ctx.Done(), m.pcrInformer.HasSynced, m.nodeInformer.HasSynced) {
|
||||
return
|
||||
}
|
||||
go wait.JitterUntilWithContext(ctx, m.runRefreshPass, 1*time.Minute, 1.0, false)
|
||||
go wait.UntilWithContext(ctx, m.runProjectionProcessor, time.Second)
|
||||
<-ctx.Done()
|
||||
|
||||
m.projectionQueue.ShutDown()
|
||||
|
||||
klog.InfoS("podcertificate.IssuingManager shut down")
|
||||
}
|
||||
|
||||
func (m *IssuingManager) runProjectionProcessor(ctx context.Context) {
|
||||
for m.processNextProjection(ctx) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *IssuingManager) processNextProjection(ctx context.Context) bool {
|
||||
key, quit := m.projectionQueue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer m.projectionQueue.Done(key)
|
||||
|
||||
err := m.handleProjection(ctx, key)
|
||||
if err != nil {
|
||||
utilruntime.HandleErrorWithContext(ctx, err, "while handling podCertificate projected volume source", "namespace", key.namespace, "pod", key.podName, "volume", key.volumeName, "sourceIndex", key.sourceIndex)
|
||||
m.projectionQueue.AddRateLimited(key)
|
||||
return true
|
||||
}
|
||||
|
||||
m.projectionQueue.Forget(key)
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *IssuingManager) handleProjection(ctx context.Context, key projectionKey) error {
|
||||
// Remember, returning nil from this function indicates that the work item
|
||||
// was successfully processed, and should be dropped from the queue.
|
||||
|
||||
pod, ok := m.podManager.GetPodByUID(types.UID(key.podUID))
|
||||
if !ok {
|
||||
// If we can't find the pod anymore, it's been deleted. Clear all our
|
||||
// internal state associated with the pod and return a nil error so it
|
||||
// is forgotten from the queue.
|
||||
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
for k := range m.credStore {
|
||||
if k.namespace == key.namespace && k.podName == key.podName && k.podUID == key.podUID {
|
||||
delete(m.credStore, k)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var source *corev1.PodCertificateProjection
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if vol.Name == key.volumeName && vol.Projected != nil {
|
||||
for i, volumeSource := range vol.Projected.Sources {
|
||||
if i == key.sourceIndex && volumeSource.PodCertificate != nil {
|
||||
source = volumeSource.PodCertificate
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if source == nil {
|
||||
// No amount of retrying will fix this problem. Log it and return nil.
|
||||
utilruntime.HandleErrorWithContext(ctx, nil, "pod does not contain the named podCertificate projected volume source", "key", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
var rec *projectionRecord
|
||||
func() {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
rec = m.credStore[key]
|
||||
|
||||
if rec == nil {
|
||||
rec = &projectionRecord{
|
||||
curState: &credStateInitial{},
|
||||
}
|
||||
m.credStore[key] = rec
|
||||
}
|
||||
}()
|
||||
|
||||
// Lock the record for the remainder of the function.
|
||||
rec.lock.Lock()
|
||||
defer rec.lock.Unlock()
|
||||
|
||||
switch state := rec.curState.(type) {
|
||||
case *credStateInitial:
|
||||
// We have not started the initial issuance. We need to create a PCR
|
||||
// and record it in credStore.
|
||||
|
||||
// We fetch the service account so we can know its UID. Ideally, Kubelet
|
||||
// would have a central component that tracks all service accounts related
|
||||
// to pods on the node using a single-item watch.
|
||||
serviceAccount, err := m.kc.CoreV1().ServiceAccounts(pod.ObjectMeta.Namespace).Get(ctx, pod.Spec.ServiceAccountName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("while fetching service account: %w", err)
|
||||
}
|
||||
|
||||
node, err := m.nodeLister.Get(string(m.nodeName))
|
||||
if err != nil {
|
||||
return fmt.Errorf("while getting node object from local cache: %w", err)
|
||||
}
|
||||
|
||||
privKey, pcr, err := m.createPodCertificateRequest(
|
||||
ctx,
|
||||
pod.ObjectMeta.Namespace,
|
||||
pod.ObjectMeta.Name, pod.ObjectMeta.UID,
|
||||
pod.Spec.ServiceAccountName, serviceAccount.ObjectMeta.UID,
|
||||
m.nodeName, node.ObjectMeta.UID,
|
||||
source.SignerName, source.KeyType, source.MaxExpirationSeconds,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while creating initial PodCertificateRequest: %w", err)
|
||||
}
|
||||
|
||||
rec.curState = &credStateWait{
|
||||
privateKey: privKey,
|
||||
pcrName: pcr.ObjectMeta.Name,
|
||||
pcrAbandonAt: pcr.ObjectMeta.CreationTimestamp.Time.Add(assumeDeletedThreshold + jitterDuration()),
|
||||
}
|
||||
|
||||
// Return nil to remove the projection from the workqueue --- it will be
|
||||
// readded once the PodCertificateRequest appears in the informer cache,
|
||||
// and goes through status updates.
|
||||
klog.V(4).InfoS("PodCertificateRequest created, moving to credStateWait", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
|
||||
return nil
|
||||
|
||||
case *credStateWait:
|
||||
// We are working through the initial issuance. We created a PCR, now
|
||||
// we need to wait for it to reach a terminal state.
|
||||
|
||||
pcr, err := m.pcrLister.PodCertificateRequests(key.namespace).Get(state.pcrName)
|
||||
if k8serrors.IsNotFound(err) && m.clock.Now().After(state.pcrAbandonAt) {
|
||||
// "Not Found" could be due to informer lag, or because someone
|
||||
// deleted the PodCertificateRequest. In the first case, the
|
||||
// correct action is to continue waiting. In the second case, the
|
||||
// correct action is to recreate the PCR. Properly disambiguating
|
||||
// the cases will require resourceVersions to be ordered, and for
|
||||
// the lister to report the highest resource version it has seen. In
|
||||
// the meantime, assume that if it has been 10 minutes since we
|
||||
// remember creating the PCR, then we must be in case 2. Return to
|
||||
// credStateInitial so we create a new PCR.
|
||||
rec.curState = &credStateInitial{}
|
||||
return fmt.Errorf("PodCertificateRequest %q appears to have been deleted", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("while getting PodCertificateRequest %q: %w", key.namespace+"/"+state.pcrName, err)
|
||||
}
|
||||
|
||||
// If the PodCertificateRequest has moved to a terminal state, update
|
||||
// our state machine accordingly.
|
||||
for _, cond := range pcr.Status.Conditions {
|
||||
switch cond.Type {
|
||||
case certificatesv1alpha1.PodCertificateRequestConditionTypeDenied:
|
||||
rec.curState = &credStateDenied{
|
||||
Reason: cond.Reason,
|
||||
Message: cond.Message,
|
||||
}
|
||||
klog.V(4).InfoS("PodCertificateRequest denied, moving to credStateDenied", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
|
||||
return nil
|
||||
case certificatesv1alpha1.PodCertificateRequestConditionTypeFailed:
|
||||
rec.curState = &credStateFailed{
|
||||
Reason: cond.Reason,
|
||||
Message: cond.Message,
|
||||
}
|
||||
klog.V(4).InfoS("PodCertificateRequest denied, moving to credStateFailed", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
|
||||
return nil
|
||||
case certificatesv1alpha1.PodCertificateRequestConditionTypeIssued:
|
||||
rec.curState = &credStateFresh{
|
||||
privateKey: state.privateKey,
|
||||
certChain: cleanCertificateChain([]byte(pcr.Status.CertificateChain)),
|
||||
beginRefreshAt: pcr.Status.BeginRefreshAt.Time.Add(jitterDuration()),
|
||||
}
|
||||
klog.V(4).InfoS("PodCertificateRequest issued, moving to credStateFresh", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing -- the request is still pending. Return nil to remove the
|
||||
// projection from the workqueue. It will be redriven when the
|
||||
// PodCertificateRequest gets an update.
|
||||
klog.V(4).InfoS("PodCertificateRequest not in terminal state, remaining in credStateWait", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
|
||||
return nil
|
||||
|
||||
case *credStateDenied:
|
||||
// Nothing to do; this is a permanent error state for the pod.
|
||||
klog.V(4).InfoS("staying in credStateDenied", "key", key)
|
||||
return nil
|
||||
|
||||
case *credStateFailed:
|
||||
// Nothing to do; this is a permanent error state for the pod.
|
||||
klog.V(4).InfoS("staying in credStateFailed", "key", key)
|
||||
return nil
|
||||
|
||||
case *credStateFresh:
|
||||
// Do nothing until it is time to refresh, then create a new PCR and
|
||||
// switch to credStateWaitRefresh.
|
||||
|
||||
if m.clock.Now().Before(state.beginRefreshAt) {
|
||||
// If it's not time to refresh yet, do nothing.
|
||||
return nil
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Time to refresh", "key", key)
|
||||
|
||||
// We fetch the service account so we can know its UID. Ideally, Kubelet
|
||||
// would have a central component that tracks all service accounts related
|
||||
// to pods on the node using a single-item watch.
|
||||
serviceAccount, err := m.kc.CoreV1().ServiceAccounts(pod.ObjectMeta.Namespace).Get(ctx, pod.Spec.ServiceAccountName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("while fetching service account: %w", err)
|
||||
}
|
||||
|
||||
node, err := m.nodeLister.Get(string(m.nodeName))
|
||||
if err != nil {
|
||||
return fmt.Errorf("while getting node object from local cache: %w", err)
|
||||
}
|
||||
|
||||
privKey, pcr, err := m.createPodCertificateRequest(
|
||||
ctx,
|
||||
pod.ObjectMeta.Namespace,
|
||||
pod.ObjectMeta.Name, pod.ObjectMeta.UID,
|
||||
pod.Spec.ServiceAccountName, serviceAccount.ObjectMeta.UID,
|
||||
m.nodeName, node.ObjectMeta.UID,
|
||||
source.SignerName, source.KeyType, source.MaxExpirationSeconds,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while creating refresh PodCertificateRequest: %w", err)
|
||||
}
|
||||
|
||||
rec.curState = &credStateWaitRefresh{
|
||||
privateKey: state.privateKey,
|
||||
certChain: state.certChain,
|
||||
beginRefreshAt: state.beginRefreshAt,
|
||||
|
||||
refreshPrivateKey: privKey,
|
||||
refreshPCRName: pcr.ObjectMeta.Name,
|
||||
refreshPCRAbandonAt: pcr.ObjectMeta.CreationTimestamp.Time.Add(assumeDeletedThreshold + jitterDuration()),
|
||||
}
|
||||
|
||||
// Return nil to remove the projection from the workqueue --- it will be
|
||||
// readded once the PodCertificateRequest appears in the informer cache,
|
||||
// and goes through status updates.
|
||||
klog.V(4).InfoS("PodCertificateRequest created, moving to credStateWaitRefresh", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
|
||||
return nil
|
||||
|
||||
case *credStateWaitRefresh:
|
||||
// Check the refresh PodCertificateRequest
|
||||
pcr, err := m.pcrLister.PodCertificateRequests(key.namespace).Get(state.refreshPCRName)
|
||||
if k8serrors.IsNotFound(err) && m.clock.Now().After(state.refreshPCRAbandonAt) {
|
||||
// "Not Found" could be due to informer lag, or because someone
|
||||
// deleted the PodCertificateRequest. In the first case, the
|
||||
// correct action is to continue waiting. In the second case, the
|
||||
// correct action is to recreate the PCR. Properly disambiguating
|
||||
// the cases will require resourceVersions to be ordered, and for
|
||||
// the lister to report the highest resource version it has seen. In
|
||||
// the meantime, assume that if it has been 10 minutes since we
|
||||
// remember creating the PCR, then we must be in case 2. Return to
|
||||
// credStateFresh so we create a new PCR.
|
||||
rec.curState = &credStateFresh{
|
||||
privateKey: state.privateKey,
|
||||
certChain: state.certChain,
|
||||
beginRefreshAt: state.beginRefreshAt,
|
||||
}
|
||||
return fmt.Errorf("PodCertificateRequest appears to have been deleted")
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("while getting PodCertificateRequest %q: %w", key.namespace+"/"+state.refreshPCRName, err)
|
||||
}
|
||||
|
||||
// If the PodCertificateRequest has moved to a terminal state, update
|
||||
// our state machine accordingly.
|
||||
for _, cond := range pcr.Status.Conditions {
|
||||
switch cond.Type {
|
||||
case certificatesv1alpha1.PodCertificateRequestConditionTypeDenied:
|
||||
rec.curState = &credStateDenied{
|
||||
Reason: cond.Reason,
|
||||
Message: cond.Message,
|
||||
}
|
||||
klog.V(4).InfoS("PodCertificateRequest denied, moving to credStateDenied", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
|
||||
return nil
|
||||
case certificatesv1alpha1.PodCertificateRequestConditionTypeFailed:
|
||||
rec.curState = &credStateFailed{
|
||||
Reason: cond.Reason,
|
||||
Message: cond.Message,
|
||||
}
|
||||
klog.V(4).InfoS("PodCertificateRequest denied, moving to credStateFailed", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
|
||||
return nil
|
||||
case certificatesv1alpha1.PodCertificateRequestConditionTypeIssued:
|
||||
rec.curState = &credStateFresh{
|
||||
privateKey: state.refreshPrivateKey,
|
||||
certChain: cleanCertificateChain([]byte(pcr.Status.CertificateChain)),
|
||||
beginRefreshAt: pcr.Status.BeginRefreshAt.Time.Add(jitterDuration()),
|
||||
}
|
||||
klog.V(4).InfoS("PodCertificateRequest issued, moving to credStateFresh", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing -- the request is still pending. Return nil to remove the
|
||||
// projection from the workqueue. It will be redriven when the
|
||||
// PodCertificateRequest gets an update.
|
||||
klog.V(4).InfoS("PodCertificateRequest not in terminal state, remaining in credStateWaitRefresh", "key", key, "pcr", pcr.ObjectMeta.Namespace+"/"+pcr.ObjectMeta.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// jitterDuration returns a 5-minute randomized jitter to the given duration, to
|
||||
// prevent multiple PodCertificateProjections from synchronizing their PCR
|
||||
// creations.
|
||||
func jitterDuration() time.Duration {
|
||||
return time.Duration(mathrand.Int64N(5 * 60 * 1_000_000_000))
|
||||
}
|
||||
|
||||
// runRefreshPass adds every non-mirror pod on the node back to the volume
|
||||
// source processing queue.
|
||||
func (m *IssuingManager) runRefreshPass(ctx context.Context) {
|
||||
allPods := m.podManager.GetPods()
|
||||
for _, pod := range allPods {
|
||||
m.queueAllProjectionsForPod(pod.ObjectMeta.UID)
|
||||
}
|
||||
}
|
||||
|
||||
// TrackPod queues the pod's podCertificate projected volume sources for
|
||||
// processing.
|
||||
func (m *IssuingManager) TrackPod(ctx context.Context, pod *corev1.Pod) {
|
||||
m.queueAllProjectionsForPod(pod.ObjectMeta.UID)
|
||||
}
|
||||
|
||||
// ForgetPod queues the pod's podCertificate projected volume sources for processing.
|
||||
//
|
||||
// The pod worker will notice that the pod no longer exists and clear any
|
||||
// pending and live credentials associated with it.
|
||||
func (m *IssuingManager) ForgetPod(ctx context.Context, pod *corev1.Pod) {
|
||||
m.queueAllProjectionsForPod(pod.ObjectMeta.UID)
|
||||
}
|
||||
|
||||
// createPodCertificateRequest creates a PodCertificateRequest.
|
||||
func (m *IssuingManager) createPodCertificateRequest(
|
||||
ctx context.Context,
|
||||
namespace string,
|
||||
podName string, podUID types.UID,
|
||||
serviceAccountName string, serviceAccountUID types.UID,
|
||||
nodeName types.NodeName, nodeUID types.UID,
|
||||
signerName, keyType string, maxExpirationSeconds *int32) ([]byte, *certificatesv1alpha1.PodCertificateRequest, error) {
|
||||
|
||||
privateKey, publicKey, proof, err := generateKeyAndProof(keyType, []byte(podUID))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("while generating keypair: %w", err)
|
||||
}
|
||||
|
||||
pkixPublicKey, err := x509.MarshalPKIXPublicKey(publicKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("while marshaling public key: %w", err)
|
||||
}
|
||||
|
||||
keyPEM, err := pemEncodeKey(privateKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("while PEM-encoding private key: %w", err)
|
||||
}
|
||||
|
||||
req := &certificatesv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
GenerateName: "req-",
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "core/v1",
|
||||
Kind: "Pod",
|
||||
Name: podName,
|
||||
UID: podUID,
|
||||
},
|
||||
},
|
||||
},
|
||||
Spec: certificatesv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: signerName,
|
||||
PodName: podName,
|
||||
PodUID: podUID,
|
||||
ServiceAccountName: serviceAccountName,
|
||||
ServiceAccountUID: serviceAccountUID,
|
||||
NodeName: nodeName,
|
||||
NodeUID: nodeUID,
|
||||
MaxExpirationSeconds: maxExpirationSeconds,
|
||||
PKIXPublicKey: pkixPublicKey,
|
||||
ProofOfPossession: proof,
|
||||
},
|
||||
}
|
||||
|
||||
req, err = m.kc.CertificatesV1alpha1().PodCertificateRequests(namespace).Create(ctx, req, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("while creating on API: %w", err)
|
||||
}
|
||||
|
||||
return keyPEM, req, nil
|
||||
}
|
||||
|
||||
func (m *IssuingManager) GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) ([]byte, []byte, error) {
|
||||
var rec *projectionRecord
|
||||
func() {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
credKey := projectionKey{
|
||||
namespace: namespace,
|
||||
podName: podName,
|
||||
podUID: podUID,
|
||||
volumeName: volumeName,
|
||||
sourceIndex: sourceIndex,
|
||||
}
|
||||
rec = m.credStore[credKey]
|
||||
|
||||
}()
|
||||
|
||||
if rec == nil {
|
||||
return nil, nil, fmt.Errorf("no credentials yet")
|
||||
}
|
||||
|
||||
rec.lock.Lock()
|
||||
defer rec.lock.Unlock()
|
||||
|
||||
return rec.curState.getCredBundle()
|
||||
}
|
||||
|
||||
func hashBytes(in []byte) []byte {
|
||||
out := sha256.Sum256(in)
|
||||
return out[:]
|
||||
}
|
||||
|
||||
func generateKeyAndProof(keyType string, toBeSigned []byte) (privKey crypto.PrivateKey, pubKey crypto.PublicKey, sig []byte, err error) {
|
||||
switch keyType {
|
||||
case "RSA3072":
|
||||
key, err := rsa.GenerateKey(rand.Reader, 3072)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("while generating RSA 3072 key: %w", err)
|
||||
}
|
||||
sig, err := rsa.SignPSS(rand.Reader, key, crypto.SHA256, hashBytes(toBeSigned), nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("while signing proof: %w", err)
|
||||
}
|
||||
return key, &key.PublicKey, sig, nil
|
||||
case "RSA4096":
|
||||
key, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("while generating RSA 4096 key: %w", err)
|
||||
}
|
||||
sig, err := rsa.SignPSS(rand.Reader, key, crypto.SHA256, hashBytes(toBeSigned), nil)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("while signing proof: %w", err)
|
||||
}
|
||||
return key, &key.PublicKey, sig, nil
|
||||
case "ECDSAP256":
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("while generating ECDSA P256 key: %w", err)
|
||||
}
|
||||
sig, err := ecdsa.SignASN1(rand.Reader, key, hashBytes(toBeSigned))
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("while signing proof: %w", err)
|
||||
}
|
||||
return key, &key.PublicKey, sig, nil
|
||||
case "ECDSAP384":
|
||||
key, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("while generating ECDSA P384 key: %w", err)
|
||||
}
|
||||
sig, err := ecdsa.SignASN1(rand.Reader, key, hashBytes(toBeSigned))
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("while signing proof: %w", err)
|
||||
}
|
||||
return key, &key.PublicKey, sig, nil
|
||||
case "ECDSAP521":
|
||||
key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("while generating ECDSA P521 key: %w", err)
|
||||
}
|
||||
sig, err := ecdsa.SignASN1(rand.Reader, key, hashBytes(toBeSigned))
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("while signing proof: %w", err)
|
||||
}
|
||||
return key, &key.PublicKey, sig, nil
|
||||
case "ED25519":
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("while generating Ed25519 key: %w", err)
|
||||
}
|
||||
sig := ed25519.Sign(priv, toBeSigned)
|
||||
return priv, pub, sig, nil
|
||||
default:
|
||||
return nil, nil, nil, fmt.Errorf("unknown key type %q", keyType)
|
||||
}
|
||||
}
|
||||
|
||||
func pemEncodeKey(key crypto.PrivateKey) ([]byte, error) {
|
||||
keyDER, err := x509.MarshalPKCS8PrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("while marshaling key to PKCS#8: %w", err)
|
||||
}
|
||||
|
||||
return pem.EncodeToMemory(&pem.Block{
|
||||
Type: "PRIVATE KEY",
|
||||
Bytes: keyDER,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// ensure that all inter-block data and block headers are dropped from the
|
||||
// certificate chain.
|
||||
func cleanCertificateChain(in []byte) []byte {
|
||||
outChain := &bytes.Buffer{}
|
||||
|
||||
rest := in
|
||||
var b *pem.Block
|
||||
for {
|
||||
b, rest = pem.Decode(rest)
|
||||
if b == nil {
|
||||
break
|
||||
}
|
||||
|
||||
cleanedBlock := &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: b.Bytes,
|
||||
}
|
||||
outChain.Write(pem.EncodeToMemory(cleanedBlock))
|
||||
}
|
||||
|
||||
return outChain.Bytes()
|
||||
}
|
||||
|
||||
// NoOpManager is an implementation of Manager that just returns errors, meant
|
||||
// for use in static/detached Kubelet mode.
|
||||
type NoOpManager struct{}
|
||||
|
||||
var _ Manager = (*NoOpManager)(nil)
|
||||
|
||||
func (m *NoOpManager) TrackPod(ctx context.Context, pod *corev1.Pod) {
|
||||
}
|
||||
|
||||
func (m *NoOpManager) ForgetPod(ctx context.Context, pod *corev1.Pod) {
|
||||
}
|
||||
|
||||
func (m *NoOpManager) GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) ([]byte, []byte, error) {
|
||||
return nil, nil, fmt.Errorf("unimplemented")
|
||||
}
|
||||
532
pkg/kubelet/podcertificate/podcertificatemanager_test.go
Normal file
532
pkg/kubelet/podcertificate/podcertificatemanager_test.go
Normal file
|
|
@ -0,0 +1,532 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podcertificate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
certsv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
certlistersv1alpha1 "k8s.io/client-go/listers/certificates/v1alpha1"
|
||||
corelistersv1 "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/test/utils/hermeticpodcertificatesigner"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
testclock "k8s.io/utils/clock/testing"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func TestTransitionInitialToWait(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(ktesting.Init(t))
|
||||
defer cancel()
|
||||
|
||||
kc := fake.NewSimpleClientset()
|
||||
clock := testclock.NewFakeClock(mustRFC3339(t, "2010-01-01T00:00:00Z"))
|
||||
|
||||
signerName := "foo.com/signer"
|
||||
|
||||
pcrStore := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
|
||||
pcrLister := certlistersv1alpha1.NewPodCertificateRequestLister(pcrStore)
|
||||
|
||||
nodeStore := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
|
||||
nodeLister := corelistersv1.NewNodeLister(nodeStore)
|
||||
node1 := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
UID: "node1-uid",
|
||||
},
|
||||
}
|
||||
nodeStore.Add(node1)
|
||||
|
||||
workloadSA, err := kc.CoreV1().ServiceAccounts("ns1").Create(ctx, &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns1",
|
||||
Name: "workload",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating workload serviceaccount: %v", err)
|
||||
}
|
||||
|
||||
node1PodManager := &FakeSynchronousPodManager{
|
||||
pods: []*corev1.Pod{},
|
||||
}
|
||||
|
||||
workloadPod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns1",
|
||||
Name: "workload",
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ServiceAccountName: workloadSA.ObjectMeta.Name,
|
||||
NodeName: "node1",
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: "notarealimage",
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "certificate",
|
||||
MountPath: "/run/foo-cert",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "certificate",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Projected: &corev1.ProjectedVolumeSource{
|
||||
Sources: []corev1.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &corev1.PodCertificateProjection{
|
||||
SignerName: signerName,
|
||||
KeyType: "ED25519",
|
||||
CredentialBundlePath: "creds.pem",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400), // Defaulting doesn't work with a fake client.
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
node1PodManager.pods = append(node1PodManager.pods, workloadPod)
|
||||
|
||||
node1PodCertificateManager := &IssuingManager{
|
||||
kc: kc,
|
||||
podManager: node1PodManager,
|
||||
pcrLister: pcrLister,
|
||||
nodeLister: nodeLister,
|
||||
nodeName: types.NodeName("node1"),
|
||||
clock: clock,
|
||||
credStore: map[projectionKey]*projectionRecord{},
|
||||
}
|
||||
|
||||
if err := node1PodCertificateManager.handleProjection(ctx, projectionKey{workloadPod.ObjectMeta.Namespace, workloadPod.ObjectMeta.Name, string(workloadPod.ObjectMeta.UID), "certificate", 0}); err != nil {
|
||||
t.Fatalf("Unexpected error while running handleProjection: %v", err)
|
||||
}
|
||||
|
||||
gotPCRs, err := kc.CertificatesV1alpha1().PodCertificateRequests("ns1").List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error listing PodCertificateRequests in fake client: %v", err)
|
||||
}
|
||||
|
||||
if len(gotPCRs.Items) != 1 {
|
||||
t.Fatalf("Wrong number of PodCertificateRequests after calling handleProjection; got %d, want 1", len(gotPCRs.Items))
|
||||
}
|
||||
|
||||
gotPCR := gotPCRs.Items[0]
|
||||
|
||||
// Check that the created PCR spec matches expectations. Blank out fields on
|
||||
// gotPCR that we don't care about.
|
||||
wantPCR := &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns1",
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: workloadPod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].PodCertificate.SignerName,
|
||||
PodName: workloadPod.ObjectMeta.Name,
|
||||
PodUID: workloadPod.ObjectMeta.UID,
|
||||
ServiceAccountName: workloadSA.ObjectMeta.Name,
|
||||
ServiceAccountUID: workloadSA.ObjectMeta.UID,
|
||||
NodeName: types.NodeName("node1"),
|
||||
NodeUID: node1.ObjectMeta.UID,
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
},
|
||||
}
|
||||
gotPCRClone := gotPCR.DeepCopy()
|
||||
gotPCRClone.ObjectMeta = metav1.ObjectMeta{}
|
||||
gotPCRClone.ObjectMeta.Namespace = gotPCR.ObjectMeta.Namespace
|
||||
gotPCRClone.Spec.PKIXPublicKey = nil
|
||||
gotPCRClone.Spec.ProofOfPossession = nil
|
||||
gotPCRClone.Status = certsv1alpha1.PodCertificateRequestStatus{}
|
||||
if diff := cmp.Diff(gotPCRClone, wantPCR); diff != "" {
|
||||
t.Fatalf("PodCertificateManager created a bad PCR; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFullFlow(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(ktesting.Init(t))
|
||||
defer cancel()
|
||||
|
||||
kc := fake.NewSimpleClientset()
|
||||
informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0)
|
||||
clock := testclock.NewFakeClock(mustRFC3339(t, "2010-01-01T00:00:00Z"))
|
||||
|
||||
//
|
||||
// Configure and boot up a fake podcertificaterequest signing controller.
|
||||
//
|
||||
|
||||
signerName := "foo.com/signer"
|
||||
|
||||
caKeys, caCerts, err := hermeticpodcertificatesigner.GenerateCAHierarchy(1)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error generating CA hierarchy: %v", err)
|
||||
}
|
||||
pcrSigner := hermeticpodcertificatesigner.New(clock, signerName, caKeys, caCerts, kc)
|
||||
go pcrSigner.Run(ctx)
|
||||
|
||||
//
|
||||
// Configure and boot up enough Kubelet subsystems to run an IssuingManager.
|
||||
//
|
||||
|
||||
node1, err := kc.CoreV1().Nodes().Create(ctx, &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating %s: %v", node1.ObjectMeta.Name, err)
|
||||
}
|
||||
|
||||
node1PodManager := &FakePodManager{
|
||||
podLister: informerFactory.Core().V1().Pods().Lister(),
|
||||
}
|
||||
|
||||
node1PodCertificateManager := NewIssuingManager(
|
||||
kc,
|
||||
node1PodManager,
|
||||
informerFactory.Certificates().V1alpha1().PodCertificateRequests(),
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
types.NodeName(node1.ObjectMeta.Name),
|
||||
clock,
|
||||
)
|
||||
|
||||
informerFactory.Start(ctx.Done())
|
||||
go node1PodCertificateManager.Run(ctx)
|
||||
|
||||
//
|
||||
// Make a pod that uses a podcertificate volume.
|
||||
//
|
||||
|
||||
workloadNS, err := kc.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "workload-ns",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating workload namespace: %v", err)
|
||||
}
|
||||
|
||||
workloadSA, err := kc.CoreV1().ServiceAccounts(workloadNS.ObjectMeta.Name).Create(ctx, &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: workloadNS.ObjectMeta.Name,
|
||||
Name: "workload",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating workload serviceaccount: %v", err)
|
||||
}
|
||||
|
||||
workloadPod, err := kc.CoreV1().Pods(workloadNS.ObjectMeta.Name).Create(ctx, &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: workloadNS.ObjectMeta.Name,
|
||||
Name: "workload",
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ServiceAccountName: workloadSA.ObjectMeta.Name,
|
||||
NodeName: node1.ObjectMeta.Name,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: "notarealimage",
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "certificate",
|
||||
MountPath: "/run/foo-cert",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "certificate",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Projected: &corev1.ProjectedVolumeSource{
|
||||
Sources: []corev1.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &corev1.PodCertificateProjection{
|
||||
SignerName: signerName,
|
||||
KeyType: "ED25519",
|
||||
CredentialBundlePath: "creds.pem",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400), // Defaulting doesn't work with a fake client.
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating workload pod: %v", err)
|
||||
}
|
||||
|
||||
// Because our fake podManager is based on an informer, we need to poll
|
||||
// until workloadPod is reflected in the informer.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, ok := node1PodManager.GetPodByUID(workloadPod.ObjectMeta.UID)
|
||||
return ok, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while waiting node1 podManager to know about workloadPod: %v", err)
|
||||
}
|
||||
|
||||
node1PodCertificateManager.TrackPod(ctx, workloadPod)
|
||||
|
||||
// Within a few seconds, we should see a PodCertificateRequest created for
|
||||
// this pod.
|
||||
var gotPCR *certsv1alpha1.PodCertificateRequest
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
pcrs, err := kc.CertificatesV1alpha1().PodCertificateRequests(workloadNS.ObjectMeta.Name).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("while listing PodCertificateRequests: %w", err)
|
||||
}
|
||||
|
||||
if len(pcrs.Items) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
gotPCR = &pcrs.Items[0]
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while waiting for PCR to be created: %v", err)
|
||||
}
|
||||
|
||||
// Check that the created PCR spec matches expectations. Blank out fields on
|
||||
// gotPCR that we don't care about. Blank out status, because the
|
||||
// controller might have already signed it.
|
||||
wantPCR := &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: workloadNS.ObjectMeta.Name,
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: workloadPod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].PodCertificate.SignerName,
|
||||
PodName: workloadPod.ObjectMeta.Name,
|
||||
PodUID: workloadPod.ObjectMeta.UID,
|
||||
ServiceAccountName: workloadSA.ObjectMeta.Name,
|
||||
ServiceAccountUID: workloadSA.ObjectMeta.UID,
|
||||
NodeName: types.NodeName(node1.ObjectMeta.Name),
|
||||
NodeUID: node1.ObjectMeta.UID,
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
},
|
||||
}
|
||||
gotPCRClone := gotPCR.DeepCopy()
|
||||
gotPCRClone.ObjectMeta = metav1.ObjectMeta{}
|
||||
gotPCRClone.ObjectMeta.Namespace = gotPCR.ObjectMeta.Namespace
|
||||
gotPCRClone.Spec.PKIXPublicKey = nil
|
||||
gotPCRClone.Spec.ProofOfPossession = nil
|
||||
gotPCRClone.Status = certsv1alpha1.PodCertificateRequestStatus{}
|
||||
if diff := cmp.Diff(gotPCRClone, wantPCR); diff != "" {
|
||||
t.Fatalf("PodCertificateManager created a bad PCR; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
|
||||
// Wait some more time for the PCR to be issued.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
pcrs, err := kc.CertificatesV1alpha1().PodCertificateRequests(workloadNS.ObjectMeta.Name).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("while listing PodCertificateRequests: %w", err)
|
||||
}
|
||||
|
||||
if len(pcrs.Items) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
gotPCR = &pcrs.Items[0]
|
||||
|
||||
for _, cond := range gotPCR.Status.Conditions {
|
||||
switch cond.Type {
|
||||
case certsv1alpha1.PodCertificateRequestConditionTypeDenied,
|
||||
certsv1alpha1.PodCertificateRequestConditionTypeFailed,
|
||||
certsv1alpha1.PodCertificateRequestConditionTypeIssued:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while waiting for PCR to be issued: %v", err)
|
||||
}
|
||||
|
||||
isIssued := slices.ContainsFunc(gotPCR.Status.Conditions, func(cond metav1.Condition) bool {
|
||||
return cond.Type == certsv1alpha1.PodCertificateRequestConditionTypeIssued
|
||||
})
|
||||
if !isIssued {
|
||||
t.Fatalf("The test signingController didn't issue the PCR:\n%+v", gotPCR)
|
||||
}
|
||||
|
||||
// Now we know that the PCR was issued, so we can wait for the
|
||||
// podcertificate manager to return some valid credentials.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, _, err := node1PodCertificateManager.GetPodCertificateCredentialBundle(ctx, workloadPod.ObjectMeta.Namespace, workloadPod.ObjectMeta.Name, string(workloadPod.ObjectMeta.UID), "certificate", 0)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while waiting for podcertificate manager to return valid credentials: %v", err)
|
||||
}
|
||||
|
||||
_, certChain, err := node1PodCertificateManager.GetPodCertificateCredentialBundle(ctx, workloadPod.ObjectMeta.Namespace, workloadPod.ObjectMeta.Name, string(workloadPod.ObjectMeta.UID), "certificate", 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting credentials from pod certificate manager: %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(string(certChain), gotPCR.Status.CertificateChain); diff != "" {
|
||||
t.Fatalf("PodCertificate manager returned bad cert chain; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
|
||||
// Fast-forward time until it is past beginRefreshAt (including the possible 5-minute jitter).
|
||||
clock.Step(23*time.Hour + 37*time.Minute)
|
||||
|
||||
// Within a few seconds, we should see a new PodCertificateRequest created for
|
||||
// this pod.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
pcrs, err := kc.CertificatesV1alpha1().PodCertificateRequests(workloadNS.ObjectMeta.Name).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("while listing PodCertificateRequests: %w", err)
|
||||
}
|
||||
|
||||
if len(pcrs.Items) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
gotPCR = &pcrs.Items[0]
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while waiting for PCR to be created: %v", err)
|
||||
}
|
||||
|
||||
// We will assume that the created PCR matches our expectations.
|
||||
|
||||
// Wait some more time for the new PCR to be issued.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
pcrs, err := kc.CertificatesV1alpha1().PodCertificateRequests(workloadNS.ObjectMeta.Name).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("while listing PodCertificateRequests: %w", err)
|
||||
}
|
||||
|
||||
if len(pcrs.Items) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
gotPCR = &pcrs.Items[0]
|
||||
|
||||
for _, cond := range gotPCR.Status.Conditions {
|
||||
switch cond.Type {
|
||||
case certsv1alpha1.PodCertificateRequestConditionTypeDenied,
|
||||
certsv1alpha1.PodCertificateRequestConditionTypeFailed,
|
||||
certsv1alpha1.PodCertificateRequestConditionTypeIssued:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while waiting for PCR to be issued: %v", err)
|
||||
}
|
||||
|
||||
// Now we know that the PCR was issued, so we can wait for the
|
||||
// podcertificate manager to start returning the new certificate
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, certChain, err := node1PodCertificateManager.GetPodCertificateCredentialBundle(ctx, workloadPod.ObjectMeta.Namespace, workloadPod.ObjectMeta.Name, string(workloadPod.ObjectMeta.UID), "certificate", 0)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if string(certChain) != gotPCR.Status.CertificateChain {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while waiting for podcertificate manager to return valid credentials: %v", err)
|
||||
}
|
||||
|
||||
_, certChain, err = node1PodCertificateManager.GetPodCertificateCredentialBundle(ctx, workloadPod.ObjectMeta.Namespace, workloadPod.ObjectMeta.Name, string(workloadPod.ObjectMeta.UID), "certificate", 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting credentials from pod certificate manager: %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(string(certChain), gotPCR.Status.CertificateChain); diff != "" {
|
||||
t.Fatalf("PodCertificate manager returned bad cert chain; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
type FakeSynchronousPodManager struct {
|
||||
pods []*corev1.Pod
|
||||
}
|
||||
|
||||
func (f *FakeSynchronousPodManager) GetPods() []*corev1.Pod {
|
||||
return f.pods
|
||||
}
|
||||
|
||||
func (f *FakeSynchronousPodManager) GetPodByUID(uid types.UID) (*corev1.Pod, bool) {
|
||||
for _, pod := range f.pods {
|
||||
if pod.ObjectMeta.UID == uid {
|
||||
return pod, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
type FakePodManager struct {
|
||||
podLister corelistersv1.PodLister
|
||||
}
|
||||
|
||||
func (f *FakePodManager) GetPods() []*corev1.Pod {
|
||||
ret, _ := f.podLister.List(labels.Everything())
|
||||
return ret
|
||||
}
|
||||
|
||||
func (f *FakePodManager) GetPodByUID(uid types.UID) (*corev1.Pod, bool) {
|
||||
list, _ := f.podLister.List(labels.Everything())
|
||||
for _, pod := range list {
|
||||
if pod.ObjectMeta.UID == uid {
|
||||
return pod, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func mustRFC3339(t *testing.T, stamp string) time.Time {
|
||||
got, err := time.Parse(time.RFC3339, stamp)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while parsing timestamp: %v", err)
|
||||
}
|
||||
return got
|
||||
}
|
||||
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package kubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
|
|
@ -35,6 +36,7 @@ import (
|
|||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/kubelet/clustertrustbundle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
"k8s.io/kubernetes/pkg/kubelet/podcertificate"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
"k8s.io/kubernetes/pkg/kubelet/token"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
|
@ -81,6 +83,7 @@ func NewInitializedVolumePluginMgr(
|
|||
configMapManager: configMapManager,
|
||||
tokenManager: tokenManager,
|
||||
clusterTrustBundleManager: clusterTrustBundleManager,
|
||||
podCertificateManager: kubelet.podCertificateManager,
|
||||
informerFactory: informerFactory,
|
||||
csiDriverLister: csiDriverLister,
|
||||
csiDriversSynced: csiDriversSynced,
|
||||
|
|
@ -110,6 +113,7 @@ type kubeletVolumeHost struct {
|
|||
tokenManager *token.Manager
|
||||
configMapManager configmap.Manager
|
||||
clusterTrustBundleManager clustertrustbundle.Manager
|
||||
podCertificateManager podcertificate.Manager
|
||||
informerFactory informers.SharedInformerFactory
|
||||
csiDriverLister storagelisters.CSIDriverLister
|
||||
csiDriversSynced cache.InformerSynced
|
||||
|
|
@ -261,6 +265,10 @@ func (kvh *kubeletVolumeHost) GetTrustAnchorsBySigner(signerName string, labelSe
|
|||
return kvh.clusterTrustBundleManager.GetTrustAnchorsBySigner(signerName, labelSelector, allowMissing)
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) ([]byte, []byte, error) {
|
||||
return kvh.podCertificateManager.GetPodCertificateCredentialBundle(ctx, namespace, podName, volumeName, podUID, sourceIndex)
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetNodeLabels() (map[string]string, error) {
|
||||
node, err := kvh.kubelet.GetNode()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ import (
|
|||
autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1" // should this change, too? there are still certv1beta1.CSR printers, but not their v1 versions
|
||||
certificatesv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
|
||||
coordinationv1 "k8s.io/api/coordination/v1"
|
||||
coordinationv1alpha2 "k8s.io/api/coordination/v1alpha2"
|
||||
|
|
@ -425,6 +426,17 @@ func AddHandlers(h printers.PrintHandler) {
|
|||
h.TableHandler(clusterTrustBundleColumnDefinitions, printClusterTrustBundle)
|
||||
h.TableHandler(clusterTrustBundleColumnDefinitions, printClusterTrustBundleList)
|
||||
|
||||
podCertificateRequestColumnDefinitions := []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
|
||||
{Name: "PodName", Type: "string", Description: certificatesv1alpha1.PodCertificateRequestSpec{}.SwaggerDoc()["podName"]},
|
||||
{Name: "ServiceAccountName", Type: "string", Description: certificatesv1alpha1.PodCertificateRequestSpec{}.SwaggerDoc()["serviceAccountName"]},
|
||||
{Name: "NodeName", Type: "string", Description: certificatesv1alpha1.PodCertificateRequestSpec{}.SwaggerDoc()["nodeName"]},
|
||||
{Name: "SignerName", Type: "string", Description: certificatesv1alpha1.PodCertificateRequestSpec{}.SwaggerDoc()["signerName"]},
|
||||
{Name: "State", Type: "string", Description: "Is the request Pending, Issued, Denied, or Failed?"},
|
||||
}
|
||||
h.TableHandler(podCertificateRequestColumnDefinitions, printPodCertificateRequest)
|
||||
h.TableHandler(podCertificateRequestColumnDefinitions, printPodCertificateRequestList)
|
||||
|
||||
leaseColumnDefinitions := []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
|
||||
{Name: "Holder", Type: "string", Description: coordinationv1.LeaseSpec{}.SwaggerDoc()["holderIdentity"]},
|
||||
|
|
@ -2328,6 +2340,41 @@ func printClusterTrustBundleList(list *certificates.ClusterTrustBundleList, opti
|
|||
return rows, nil
|
||||
}
|
||||
|
||||
func printPodCertificateRequest(obj *certificates.PodCertificateRequest, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
}
|
||||
|
||||
// "Issued", "Denied", and "Failed" are all mutually-exclusive states, and
|
||||
// they must have Status "True".
|
||||
state := "Pending"
|
||||
for _, cond := range obj.Status.Conditions {
|
||||
switch cond.Type {
|
||||
case certificates.PodCertificateRequestConditionTypeIssued:
|
||||
state = "Issued"
|
||||
case certificates.PodCertificateRequestConditionTypeDenied:
|
||||
state = "Denied"
|
||||
case certificates.PodCertificateRequestConditionTypeFailed:
|
||||
state = "Failed"
|
||||
}
|
||||
}
|
||||
|
||||
row.Cells = append(row.Cells, obj.Name, obj.Spec.PodName, obj.Spec.ServiceAccountName, string(obj.Spec.NodeName), obj.Spec.SignerName, state)
|
||||
return []metav1.TableRow{row}, nil
|
||||
}
|
||||
|
||||
func printPodCertificateRequestList(list *certificates.PodCertificateRequestList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
for i := range list.Items {
|
||||
r, err := printPodCertificateRequest(&list.Items[i], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, r...)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func printComponentStatus(obj *api.ComponentStatus, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
|
|
|
|||
|
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
api "k8s.io/kubernetes/pkg/apis/certificates"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/certificates/podcertificaterequest"
|
||||
"k8s.io/utils/clock"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
)
|
||||
|
||||
// REST is a RESTStorage for PodCertificateRequest.
|
||||
type REST struct {
|
||||
*genericregistry.Store
|
||||
}
|
||||
|
||||
var _ rest.StandardStorage = &REST{}
|
||||
var _ rest.TableConvertor = &REST{}
|
||||
var _ genericregistry.GenericStore = &REST{}
|
||||
|
||||
// NewREST returns a RESTStorage object for PodCertificateRequest objects.
|
||||
func NewREST(optsGetter generic.RESTOptionsGetter, authorizer authorizer.Authorizer, clock clock.PassiveClock) (*REST, *StatusREST, error) {
|
||||
strategy := podcertificaterequest.NewStrategy()
|
||||
|
||||
store := &genericregistry.Store{
|
||||
NewFunc: func() runtime.Object { return &api.PodCertificateRequest{} },
|
||||
NewListFunc: func() runtime.Object { return &api.PodCertificateRequestList{} },
|
||||
DefaultQualifiedResource: api.Resource("podcertificaterequests"),
|
||||
SingularQualifiedResource: api.Resource("podcertificaterequest"),
|
||||
|
||||
CreateStrategy: strategy,
|
||||
UpdateStrategy: strategy,
|
||||
DeleteStrategy: strategy,
|
||||
|
||||
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
|
||||
}
|
||||
options := &generic.StoreOptions{
|
||||
RESTOptions: optsGetter,
|
||||
AttrFunc: getAttrs,
|
||||
}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
statusStrategy := podcertificaterequest.NewStatusStrategy(strategy, authorizer, clock)
|
||||
|
||||
// Subresources use the same store and creation strategy, which only
|
||||
// allows empty subs. Updates to an existing subresource are handled by
|
||||
// dedicated strategies.
|
||||
statusStore := *store
|
||||
statusStore.UpdateStrategy = statusStrategy
|
||||
statusStore.ResetFieldsStrategy = statusStrategy
|
||||
|
||||
return &REST{store}, &StatusREST{store: &statusStore}, nil
|
||||
}
|
||||
|
||||
func getAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
req, ok := obj.(*api.PodCertificateRequest)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("not a podcertificaterequest")
|
||||
}
|
||||
|
||||
selectableFields := generic.MergeFieldsSets(generic.ObjectMetaFieldsSet(&req.ObjectMeta, true), fields.Set{
|
||||
"spec.signerName": req.Spec.SignerName,
|
||||
"spec.podName": req.Spec.PodName,
|
||||
"spec.nodeName": string(req.Spec.NodeName),
|
||||
})
|
||||
|
||||
return labels.Set(req.Labels), selectableFields, nil
|
||||
}
|
||||
|
||||
// StatusREST implements the REST endpoint for changing the status of a PodCertificateRequest.
|
||||
type StatusREST struct {
|
||||
store *genericregistry.Store
|
||||
}
|
||||
|
||||
// New creates a new PodCertificateRequest object.
|
||||
func (r *StatusREST) New() runtime.Object {
|
||||
return &api.PodCertificateRequest{}
|
||||
}
|
||||
|
||||
// Destroy cleans up resources on shutdown.
|
||||
func (r *StatusREST) Destroy() {
|
||||
// Given that underlying store is shared with REST,
|
||||
// we don't destroy it here explicitly.
|
||||
}
|
||||
|
||||
// Get retrieves the object from the storage. It is required to support Patch.
|
||||
func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
|
||||
return r.store.Get(ctx, name, options)
|
||||
}
|
||||
|
||||
// Update alters the status subset of an object.
|
||||
func (r *StatusREST) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
|
||||
// We are explicitly setting forceAllowCreate to false in the call to the underlying storage because
|
||||
// subresources should never allow create on update.
|
||||
return r.store.Update(ctx, name, objInfo, createValidation, updateValidation, false, options)
|
||||
}
|
||||
|
||||
// GetResetFields implements rest.ResetFieldsStrategy
|
||||
func (r *StatusREST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
return r.store.GetResetFields()
|
||||
}
|
||||
|
||||
func (r *StatusREST) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
|
||||
return r.store.ConvertToTable(ctx, object, tableOptions)
|
||||
}
|
||||
|
||||
var _ = rest.Patcher(&StatusREST{})
|
||||
|
|
@ -0,0 +1,660 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing"
|
||||
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
"k8s.io/kubernetes/pkg/registry/registrytest"
|
||||
"k8s.io/utils/clock"
|
||||
testclock "k8s.io/utils/clock/testing"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
type fakeAuthorizer struct {
|
||||
decision authorizer.Decision
|
||||
reason string
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
return f.decision, f.reason, f.err
|
||||
}
|
||||
|
||||
func newStorage(t *testing.T, authz authorizer.Authorizer, clock clock.PassiveClock) (*REST, *StatusREST, *etcd3testing.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorageForResource(t, certificates.SchemeGroupVersion.WithResource("podcertificaterequests").GroupResource())
|
||||
restOptions := generic.RESTOptions{
|
||||
StorageConfig: etcdStorage,
|
||||
Decorator: generic.UndecoratedStorage,
|
||||
DeleteCollectionWorkers: 1,
|
||||
ResourcePrefix: "podcertificaterequests",
|
||||
}
|
||||
storage, statusStorage, err := NewREST(restOptions, authz, clock)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from REST storage: %v", err)
|
||||
}
|
||||
return storage, statusStorage, server
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
authz := &fakeAuthorizer{
|
||||
decision: authorizer.DecisionAllow,
|
||||
}
|
||||
|
||||
storage, _, server := newStorage(t, authz, testclock.NewFakePassiveClock(mustParseTime(t, "1970-01-01T00:00:00Z")))
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.SetUserInfo(&user.DefaultInfo{
|
||||
Name: "foo",
|
||||
})
|
||||
|
||||
podUID1 := types.UID("pod-uid-1")
|
||||
_, _, ed25519PubPKIX1, ed25519Proof1 := mustMakeEd25519KeyAndProof(t, []byte(podUID1))
|
||||
_, _, ed25519PubPKIX2, ed25519Proof2 := mustMakeEd25519KeyAndProof(t, []byte("other-value"))
|
||||
|
||||
test.TestCreate(
|
||||
// Valid PCR
|
||||
&certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: test.TestNamespace(),
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "k8s.io/foo",
|
||||
PodName: "pod-1",
|
||||
PodUID: podUID1,
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: types.UID("sa-uid-1"),
|
||||
NodeName: "node-1",
|
||||
NodeUID: types.UID("node-uid-1"),
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: ed25519PubPKIX1,
|
||||
ProofOfPossession: ed25519Proof1,
|
||||
},
|
||||
},
|
||||
// Invalid PCR -- proof-of-possession signed wrong value
|
||||
&certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: test.TestNamespace(),
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "k8s.io/foo",
|
||||
PodName: "pod-1",
|
||||
PodUID: podUID1,
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: types.UID("sa-uid-1"),
|
||||
NodeName: "node-1",
|
||||
NodeUID: types.UID("node-uid-1"),
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: ed25519PubPKIX2,
|
||||
ProofOfPossession: ed25519Proof2,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
authz := &fakeAuthorizer{
|
||||
decision: authorizer.DecisionAllow,
|
||||
}
|
||||
storage, _, server := newStorage(t, authz, testclock.NewFakePassiveClock(mustParseTime(t, "1970-01-01T00:00:00Z")))
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.SetUserInfo(&user.DefaultInfo{
|
||||
Name: "foo",
|
||||
})
|
||||
|
||||
podUID1 := types.UID("pod-uid-1")
|
||||
_, _, ed25519PubPKIX1, ed25519Proof1 := mustMakeEd25519KeyAndProof(t, []byte(podUID1))
|
||||
|
||||
test.TestUpdate(
|
||||
// Valid PCR as a base
|
||||
&certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: test.TestNamespace(),
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "k8s.io/foo",
|
||||
PodName: "pod-1",
|
||||
PodUID: podUID1,
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: types.UID("sa-uid-1"),
|
||||
NodeName: "node-1",
|
||||
NodeUID: types.UID("node-uid-1"),
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: ed25519PubPKIX1,
|
||||
ProofOfPossession: ed25519Proof1,
|
||||
},
|
||||
},
|
||||
// Valid update function
|
||||
func(object runtime.Object) runtime.Object {
|
||||
pcr := object.(*certificates.PodCertificateRequest)
|
||||
if pcr.ObjectMeta.Annotations == nil {
|
||||
pcr.ObjectMeta.Annotations = map[string]string{}
|
||||
}
|
||||
pcr.ObjectMeta.Annotations["k8s.io/cool-annotation"] = "value"
|
||||
return pcr
|
||||
},
|
||||
// Invalid update function
|
||||
func(object runtime.Object) runtime.Object {
|
||||
pcr := object.(*certificates.PodCertificateRequest)
|
||||
pcr.Spec.SignerName = "test.k8s.io/new-signer"
|
||||
return pcr
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdateStompsStatus(t *testing.T) {
|
||||
authz := &fakeAuthorizer{
|
||||
decision: authorizer.DecisionAllow,
|
||||
}
|
||||
storage, _, server := newStorage(t, authz, testclock.NewFakePassiveClock(mustParseTime(t, "1970-01-01T00:00:00Z")))
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.SetUserInfo(&user.DefaultInfo{
|
||||
Name: "foo",
|
||||
})
|
||||
|
||||
podUID1 := types.UID("pod-uid-1")
|
||||
_, _, ed25519PubPKIX1, ed25519Proof1 := mustMakeEd25519KeyAndProof(t, []byte(podUID1))
|
||||
|
||||
test.TestUpdate(
|
||||
// Valid PCR as a base
|
||||
&certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: test.TestNamespace(),
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "k8s.io/foo",
|
||||
PodName: "pod-1",
|
||||
PodUID: podUID1,
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: types.UID("sa-uid-1"),
|
||||
NodeName: "node-1",
|
||||
NodeUID: types.UID("node-uid-1"),
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: ed25519PubPKIX1,
|
||||
ProofOfPossession: ed25519Proof1,
|
||||
},
|
||||
},
|
||||
// Valid update function
|
||||
func(object runtime.Object) runtime.Object {
|
||||
pcr := object.(*certificates.PodCertificateRequest)
|
||||
// The strategy should stomp status.
|
||||
pcr.Status.NotAfter = ptr.To(metav1.NewTime(mustParseTime(t, "2025-01-01T00:00:00Z")))
|
||||
return pcr
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
func TestUpdateStatus(t *testing.T) {
|
||||
authz := &fakeAuthorizer{
|
||||
decision: authorizer.DecisionAllow,
|
||||
}
|
||||
_, statusStorage, server := newStorage(t, authz, testclock.NewFakePassiveClock(mustParseTime(t, "1970-01-01T00:00:00Z")))
|
||||
defer server.Terminate(t)
|
||||
defer statusStorage.store.DestroyFunc()
|
||||
|
||||
test := genericregistrytest.New(t, statusStorage.store)
|
||||
test.SetUserInfo(&user.DefaultInfo{
|
||||
Name: "foo",
|
||||
})
|
||||
|
||||
caCertDER, caPrivKey := mustMakeCA(t)
|
||||
podUID1 := types.UID("pod-uid-1")
|
||||
_, ed25519Pub1, ed25519PubPKIX1, ed25519Proof1 := mustMakeEd25519KeyAndProof(t, []byte(podUID1))
|
||||
pod1Cert1 := mustSignCertForPublicKey(t, 24*time.Hour, ed25519Pub1, caCertDER, caPrivKey)
|
||||
|
||||
test.TestUpdate(
|
||||
// Valid PCR as a base
|
||||
&certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: test.TestNamespace(),
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "k8s.io/foo",
|
||||
PodName: "pod-1",
|
||||
PodUID: podUID1,
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: types.UID("sa-uid-1"),
|
||||
NodeName: "node-1",
|
||||
NodeUID: types.UID("node-uid-1"),
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: ed25519PubPKIX1,
|
||||
ProofOfPossession: ed25519Proof1,
|
||||
},
|
||||
},
|
||||
// Valid update function
|
||||
func(object runtime.Object) runtime.Object {
|
||||
pcr := object.(*certificates.PodCertificateRequest)
|
||||
pcr.Status = certificates.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: certificates.PodCertificateRequestConditionTypeIssued,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Whatever",
|
||||
Message: "Foo message",
|
||||
LastTransitionTime: metav1.NewTime(time.Now()),
|
||||
},
|
||||
},
|
||||
CertificateChain: pod1Cert1,
|
||||
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
|
||||
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
|
||||
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
|
||||
}
|
||||
return pcr
|
||||
},
|
||||
// Invalid update function
|
||||
func(object runtime.Object) runtime.Object {
|
||||
pcr := object.(*certificates.PodCertificateRequest)
|
||||
pcr.Spec.SignerName = "test.k8s.io/new-signer"
|
||||
return pcr
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdateStatusStompsSpec(t *testing.T) {
|
||||
authz := &fakeAuthorizer{
|
||||
decision: authorizer.DecisionAllow,
|
||||
}
|
||||
_, statusStorage, server := newStorage(t, authz, testclock.NewFakePassiveClock(mustParseTime(t, "1970-01-01T00:00:00Z")))
|
||||
defer server.Terminate(t)
|
||||
defer statusStorage.store.DestroyFunc()
|
||||
|
||||
test := genericregistrytest.New(t, statusStorage.store)
|
||||
test.SetUserInfo(&user.DefaultInfo{
|
||||
Name: "foo",
|
||||
})
|
||||
|
||||
podUID1 := types.UID("pod-uid-1")
|
||||
_, _, ed25519PubPKIX1, ed25519Proof1 := mustMakeEd25519KeyAndProof(t, []byte(podUID1))
|
||||
|
||||
test.TestUpdate(
|
||||
// Valid PCR as a base
|
||||
&certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: test.TestNamespace(),
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "k8s.io/foo",
|
||||
PodName: "pod-1",
|
||||
PodUID: podUID1,
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: types.UID("sa-uid-1"),
|
||||
NodeName: "node-1",
|
||||
NodeUID: types.UID("node-uid-1"),
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: ed25519PubPKIX1,
|
||||
ProofOfPossession: ed25519Proof1,
|
||||
},
|
||||
},
|
||||
// Valid update function
|
||||
func(object runtime.Object) runtime.Object {
|
||||
pcr := object.(*certificates.PodCertificateRequest)
|
||||
// The stategy should stomp spec.
|
||||
pcr.Spec.SignerName = "foo.com/bar"
|
||||
return pcr
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdateStatusFailsWhenAuthorizerDenies(t *testing.T) {
|
||||
authz := &fakeAuthorizer{
|
||||
decision: authorizer.DecisionNoOpinion,
|
||||
}
|
||||
_, statusStorage, server := newStorage(t, authz, testclock.NewFakePassiveClock(mustParseTime(t, "1970-01-01T00:00:00Z")))
|
||||
defer server.Terminate(t)
|
||||
defer statusStorage.store.DestroyFunc()
|
||||
|
||||
test := genericregistrytest.New(t, statusStorage.store)
|
||||
test.SetUserInfo(&user.DefaultInfo{
|
||||
Name: "foo",
|
||||
})
|
||||
|
||||
caCertDER, caPrivKey := mustMakeCA(t)
|
||||
podUID1 := types.UID("pod-uid-1")
|
||||
_, ed25519Pub1, ed25519PubPKIX1, ed25519Proof1 := mustMakeEd25519KeyAndProof(t, []byte(podUID1))
|
||||
pod1Cert1 := mustSignCertForPublicKey(t, 24*time.Hour, ed25519Pub1, caCertDER, caPrivKey)
|
||||
|
||||
test.TestUpdate(
|
||||
// Valid PCR as a base
|
||||
&certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: test.TestNamespace(),
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "k8s.io/foo",
|
||||
PodName: "pod-1",
|
||||
PodUID: podUID1,
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: types.UID("sa-uid-1"),
|
||||
NodeName: "node-1",
|
||||
NodeUID: types.UID("node-uid-1"),
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: ed25519PubPKIX1,
|
||||
ProofOfPossession: ed25519Proof1,
|
||||
},
|
||||
},
|
||||
// Valid update function
|
||||
func(object runtime.Object) runtime.Object {
|
||||
pcr := object.(*certificates.PodCertificateRequest)
|
||||
return pcr
|
||||
},
|
||||
// Invalid update function -- normally a valid update, but above we have
|
||||
// configured the authorizer to never return DecisionAllow.
|
||||
func(object runtime.Object) runtime.Object {
|
||||
pcr := object.(*certificates.PodCertificateRequest)
|
||||
pcr.Status = certificates.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: certificates.PodCertificateRequestConditionTypeIssued,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Whatever",
|
||||
Message: "Foo message",
|
||||
LastTransitionTime: metav1.NewTime(time.Now()),
|
||||
},
|
||||
},
|
||||
CertificateChain: pod1Cert1,
|
||||
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
|
||||
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
|
||||
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
|
||||
}
|
||||
return pcr
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
authz := &fakeAuthorizer{
|
||||
decision: authorizer.DecisionAllow,
|
||||
}
|
||||
storage, _, server := newStorage(t, authz, testclock.NewFakePassiveClock(mustParseTime(t, "1970-01-01T00:00:00Z")))
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.SetUserInfo(&user.DefaultInfo{
|
||||
Name: "foo",
|
||||
})
|
||||
|
||||
podUID1 := types.UID("pod-uid-1")
|
||||
_, _, ed25519PubPKIX1, ed25519Proof1 := mustMakeEd25519KeyAndProof(t, []byte(podUID1))
|
||||
|
||||
test.TestDelete(
|
||||
&certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: test.TestNamespace(),
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "k8s.io/foo",
|
||||
PodName: "pod-1",
|
||||
PodUID: podUID1,
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: types.UID("sa-uid-1"),
|
||||
NodeName: "node-1",
|
||||
NodeUID: types.UID("node-uid-1"),
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: ed25519PubPKIX1,
|
||||
ProofOfPossession: ed25519Proof1,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
authz := &fakeAuthorizer{
|
||||
decision: authorizer.DecisionAllow,
|
||||
}
|
||||
storage, _, server := newStorage(t, authz, testclock.NewFakePassiveClock(mustParseTime(t, "1970-01-01T00:00:00Z")))
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.SetUserInfo(&user.DefaultInfo{
|
||||
Name: "foo",
|
||||
})
|
||||
|
||||
podUID1 := types.UID("pod-uid-1")
|
||||
_, _, ed25519PubPKIX1, ed25519Proof1 := mustMakeEd25519KeyAndProof(t, []byte(podUID1))
|
||||
|
||||
test.TestGet(
|
||||
&certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: test.TestNamespace(),
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "k8s.io/foo",
|
||||
PodName: "pod-1",
|
||||
PodUID: podUID1,
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: types.UID("sa-uid-1"),
|
||||
NodeName: "node-1",
|
||||
NodeUID: types.UID("node-uid-1"),
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: ed25519PubPKIX1,
|
||||
ProofOfPossession: ed25519Proof1,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
authz := &fakeAuthorizer{
|
||||
decision: authorizer.DecisionAllow,
|
||||
}
|
||||
storage, _, server := newStorage(t, authz, testclock.NewFakePassiveClock(mustParseTime(t, "1970-01-01T00:00:00Z")))
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.SetUserInfo(&user.DefaultInfo{
|
||||
Name: "foo",
|
||||
})
|
||||
|
||||
podUID1 := types.UID("pod-uid-1")
|
||||
_, _, ed25519PubPKIX1, ed25519Proof1 := mustMakeEd25519KeyAndProof(t, []byte(podUID1))
|
||||
|
||||
test.TestList(
|
||||
&certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: test.TestNamespace(),
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "k8s.io/foo",
|
||||
PodName: "pod-1",
|
||||
PodUID: podUID1,
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: types.UID("sa-uid-1"),
|
||||
NodeName: "node-1",
|
||||
NodeUID: types.UID("node-uid-1"),
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: ed25519PubPKIX1,
|
||||
ProofOfPossession: ed25519Proof1,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
authz := &fakeAuthorizer{
|
||||
decision: authorizer.DecisionAllow,
|
||||
}
|
||||
storage, _, server := newStorage(t, authz, testclock.NewFakePassiveClock(mustParseTime(t, "1970-01-01T00:00:00Z")))
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.SetUserInfo(&user.DefaultInfo{
|
||||
Name: "foo",
|
||||
})
|
||||
|
||||
podUID1 := types.UID("pod-uid-1")
|
||||
_, _, ed25519PubPKIX1, ed25519Proof1 := mustMakeEd25519KeyAndProof(t, []byte(podUID1))
|
||||
|
||||
test.TestWatch(
|
||||
&certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: test.TestNamespace(),
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "k8s.io/foo",
|
||||
PodName: "pod-1",
|
||||
PodUID: podUID1,
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: types.UID("sa-uid-1"),
|
||||
NodeName: "node-1",
|
||||
NodeUID: types.UID("node-uid-1"),
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: ed25519PubPKIX1,
|
||||
ProofOfPossession: ed25519Proof1,
|
||||
},
|
||||
},
|
||||
// matching labels
|
||||
[]labels.Set{},
|
||||
// not matching labels
|
||||
[]labels.Set{
|
||||
{"foo": "bar"},
|
||||
},
|
||||
// matching fields
|
||||
[]fields.Set{
|
||||
{
|
||||
"metadata.namespace": test.TestNamespace(),
|
||||
"metadata.name": "foo",
|
||||
"spec.signerName": "k8s.io/foo",
|
||||
"spec.nodeName": "node-1",
|
||||
},
|
||||
},
|
||||
// not matching fields
|
||||
[]fields.Set{
|
||||
{
|
||||
"metadata.namespace": test.TestNamespace(),
|
||||
"metadata.name": "foo",
|
||||
"spec.signerName": "k8s.io/othersigner",
|
||||
"spec.nodeName": "node-1",
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func mustMakeCA(t *testing.T) ([]byte, ed25519.PrivateKey) {
|
||||
signPub, signPriv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while generating CA signing key: %v", err)
|
||||
}
|
||||
|
||||
caCertTemplate := &x509.Certificate{
|
||||
IsCA: true,
|
||||
BasicConstraintsValid: true,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
NotBefore: mustParseTime(t, "1970-01-01T00:00:00Z"),
|
||||
NotAfter: mustParseTime(t, "1971-01-01T00:00:00Z"),
|
||||
}
|
||||
|
||||
caCertDER, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, signPub, signPriv)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating CA certificate: %v", err)
|
||||
}
|
||||
|
||||
return caCertDER, signPriv
|
||||
}
|
||||
|
||||
func mustParseTime(t *testing.T, stamp string) time.Time {
|
||||
got, err := time.Parse(time.RFC3339, stamp)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while parsing timestamp: %v", err)
|
||||
}
|
||||
return got
|
||||
}
|
||||
|
||||
func mustMakeEd25519KeyAndProof(t *testing.T, toBeSigned []byte) (ed25519.PrivateKey, ed25519.PublicKey, []byte, []byte) {
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while generating ed25519 key: %v", err)
|
||||
}
|
||||
pubPKIX, err := x509.MarshalPKIXPublicKey(pub)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while marshaling PKIX public key: %v", err)
|
||||
}
|
||||
sig := ed25519.Sign(priv, toBeSigned)
|
||||
return priv, pub, pubPKIX, sig
|
||||
}
|
||||
|
||||
func mustSignCertForPublicKey(t *testing.T, validity time.Duration, subjectPublicKey crypto.PublicKey, caCertDER []byte, caPrivateKey crypto.PrivateKey) string {
|
||||
certTemplate := &x509.Certificate{
|
||||
Subject: pkix.Name{
|
||||
CommonName: "foo",
|
||||
},
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
|
||||
NotBefore: mustParseTime(t, "1970-01-01T00:00:00Z"),
|
||||
NotAfter: mustParseTime(t, "1970-01-01T00:00:00Z").Add(validity),
|
||||
}
|
||||
|
||||
caCert, err := x509.ParseCertificate(caCertDER)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while parsing CA certificate: %v", err)
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(rand.Reader, certTemplate, caCert, subjectPublicKey, caPrivateKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while signing subject certificate: %v", err)
|
||||
}
|
||||
|
||||
certPEM := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: certDER,
|
||||
})
|
||||
|
||||
return string(certPEM)
|
||||
}
|
||||
175
pkg/registry/certificates/podcertificaterequest/strategy.go
Normal file
175
pkg/registry/certificates/podcertificaterequest/strategy.go
Normal file
|
|
@ -0,0 +1,175 @@
|
|||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package podcertificaterequest provides Registry interface and its RESTStorage
|
||||
// implementation for storing PodCertificateRequest objects.
|
||||
package podcertificaterequest // import "k8s.io/kubernetes/pkg/registry/certificates/podcertificaterequest"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
certvalidation "k8s.io/kubernetes/pkg/apis/certificates/validation"
|
||||
"k8s.io/kubernetes/pkg/certauthorization"
|
||||
"k8s.io/utils/clock"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
)
|
||||
|
||||
// strategy implements behavior for PodCertificateRequests.
|
||||
type Strategy struct {
|
||||
runtime.ObjectTyper
|
||||
names.NameGenerator
|
||||
}
|
||||
|
||||
var _ rest.RESTCreateStrategy = (*Strategy)(nil)
|
||||
var _ rest.RESTUpdateStrategy = (*Strategy)(nil)
|
||||
var _ rest.RESTDeleteStrategy = (*Strategy)(nil)
|
||||
|
||||
func NewStrategy() *Strategy {
|
||||
return &Strategy{
|
||||
ObjectTyper: legacyscheme.Scheme,
|
||||
NameGenerator: names.SimpleNameGenerator,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Strategy) NamespaceScoped() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Strategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
req := obj.(*certificates.PodCertificateRequest)
|
||||
req.Status = certificates.PodCertificateRequestStatus{}
|
||||
}
|
||||
|
||||
func (s *Strategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
req := obj.(*certificates.PodCertificateRequest)
|
||||
return certvalidation.ValidatePodCertificateRequestCreate(req)
|
||||
}
|
||||
|
||||
func (s *Strategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Strategy) Canonicalize(obj runtime.Object) {}
|
||||
|
||||
func (s *Strategy) AllowCreateOnUpdate() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Strategy) PrepareForUpdate(ctx context.Context, new, old runtime.Object) {
|
||||
newReq := new.(*certificates.PodCertificateRequest)
|
||||
oldReq := old.(*certificates.PodCertificateRequest)
|
||||
newReq.Status = oldReq.Status
|
||||
}
|
||||
|
||||
func (s *Strategy) ValidateUpdate(ctx context.Context, new, old runtime.Object) field.ErrorList {
|
||||
newReq := new.(*certificates.PodCertificateRequest)
|
||||
oldReq := old.(*certificates.PodCertificateRequest)
|
||||
return certvalidation.ValidatePodCertificateRequestUpdate(newReq, oldReq)
|
||||
}
|
||||
|
||||
func (s *Strategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Strategy) AllowUnconditionalUpdate() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// StatusStrategy is the strategy for the status subresource.
|
||||
type StatusStrategy struct {
|
||||
*Strategy
|
||||
authorizer authorizer.Authorizer
|
||||
clock clock.PassiveClock
|
||||
}
|
||||
|
||||
func NewStatusStrategy(strategy *Strategy, authorizer authorizer.Authorizer, clock clock.PassiveClock) *StatusStrategy {
|
||||
return &StatusStrategy{
|
||||
Strategy: strategy,
|
||||
authorizer: authorizer,
|
||||
clock: clock,
|
||||
}
|
||||
}
|
||||
|
||||
// GetResetFields returns the set of fields that get reset by the strategy
|
||||
// and should not be modified by the user.
|
||||
func (s *StatusStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
fields := map[fieldpath.APIVersion]*fieldpath.Set{
|
||||
"certificates.k8s.io/v1alpha1": fieldpath.NewSet(
|
||||
fieldpath.MakePathOrDie("spec"),
|
||||
),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
func (s *StatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newReq := obj.(*certificates.PodCertificateRequest)
|
||||
oldReq := old.(*certificates.PodCertificateRequest)
|
||||
|
||||
// Updating /status should not modify spec
|
||||
newReq.Spec = oldReq.Spec
|
||||
|
||||
metav1.ResetObjectMetaForStatus(&newReq.ObjectMeta, &oldReq.ObjectMeta)
|
||||
}
|
||||
|
||||
func (s *StatusStrategy) ValidateUpdate(ctx context.Context, new, old runtime.Object) field.ErrorList {
|
||||
oldPCR := old.(*certificates.PodCertificateRequest)
|
||||
newPCR := new.(*certificates.PodCertificateRequest)
|
||||
|
||||
errs := certvalidation.ValidatePodCertificateRequestStatusUpdate(newPCR, oldPCR, s.clock)
|
||||
if len(errs) != 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
// If the caller is trying to change any status fields, they must have
|
||||
// the appropriate "sign" permission on the requested signername.
|
||||
if !equality.Semantic.DeepEqual(oldPCR.Status, newPCR.Status) {
|
||||
user, ok := genericapirequest.UserFrom(ctx)
|
||||
if !ok {
|
||||
return field.ErrorList{
|
||||
field.InternalError(field.NewPath("spec", "signerName"), fmt.Errorf("cannot determine calling user to perform \"sign\" check")),
|
||||
}
|
||||
}
|
||||
|
||||
if !certauthorization.IsAuthorizedForSignerName(ctx, s.authorizer, user, "sign", oldPCR.Spec.SignerName) {
|
||||
klog.V(4).Infof("user not permitted to sign PodCertificateRequest %q with signerName %q", oldPCR.Name, oldPCR.Spec.SignerName)
|
||||
return field.ErrorList{
|
||||
field.Forbidden(field.NewPath("spec", "signerName"), fmt.Sprintf("User %q is not permitted to \"sign\" for signer %q", user.GetName(), oldPCR.Spec.SignerName)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WarningsOnUpdate returns warnings for the given update.
|
||||
func (s *StatusStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Canonicalize normalizes the object after validation.
|
||||
func (s *StatusStrategy) Canonicalize(obj runtime.Object) {}
|
||||
505
pkg/registry/certificates/podcertificaterequest/strategy_test.go
Normal file
505
pkg/registry/certificates/podcertificaterequest/strategy_test.go
Normal file
|
|
@ -0,0 +1,505 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podcertificaterequest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
"k8s.io/utils/clock"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func TestWarningsOnCreate(t *testing.T) {
|
||||
strategy := NewStrategy()
|
||||
|
||||
var wantWarnings []string
|
||||
gotWarnings := strategy.WarningsOnCreate(context.Background(), &certificates.PodCertificateRequest{})
|
||||
if diff := cmp.Diff(gotWarnings, wantWarnings); diff != "" {
|
||||
t.Errorf("Got wrong warnings; diff (-got +want):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllowCreateOnUpdate(t *testing.T) {
|
||||
strategy := NewStrategy()
|
||||
if strategy.AllowCreateOnUpdate() != false {
|
||||
t.Errorf("Got true, want false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWarningsOnUpdate(t *testing.T) {
|
||||
strategy := NewStrategy()
|
||||
var wantWarnings []string
|
||||
gotWarnings := strategy.WarningsOnUpdate(context.Background(), &certificates.PodCertificateRequest{}, &certificates.PodCertificateRequest{})
|
||||
if diff := cmp.Diff(gotWarnings, wantWarnings); diff != "" {
|
||||
t.Errorf("Got wrong warnings; diff (-got +want):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllowUnconditionalUpdate(t *testing.T) {
|
||||
strategy := NewStrategy()
|
||||
if strategy.AllowUnconditionalUpdate() != false {
|
||||
t.Errorf("Got true, want false")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrepareForCreate(t *testing.T) {
|
||||
// PrepareForCreate should stomp any existing status fields.
|
||||
|
||||
strategy := NewStrategy()
|
||||
|
||||
_, _, pubPKIX1, proof1 := mustMakeEd25519KeyAndProof(t, []byte("pod-1-uid"))
|
||||
|
||||
processedPCR := &certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID("pod-1-uid"),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
Status: certificates.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Denied",
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "Foo",
|
||||
Message: "Foo message",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
wantPCR := &certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID("pod-1-uid"),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
}
|
||||
|
||||
strategy.PrepareForCreate(context.Background(), processedPCR)
|
||||
|
||||
if diff := cmp.Diff(processedPCR, wantPCR); diff != "" {
|
||||
t.Errorf("Bad processed PCR; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrepareForUpdate(t *testing.T) {
|
||||
// PrepareForUpdate should stomp any existing status fields.
|
||||
|
||||
strategy := NewStrategy()
|
||||
|
||||
_, _, pubPKIX1, proof1 := mustMakeEd25519KeyAndProof(t, []byte("pod-1-uid"))
|
||||
|
||||
oldPCR := &certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID("pod-1-uid"),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
Status: certificates.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Denied",
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Foo",
|
||||
Message: "Foo message",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
processedPCR := &certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID("pod-1-uid"),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
Status: certificates.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Failed",
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Foo",
|
||||
Message: "Foo message",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
wantPCR := &certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID("pod-1-uid"),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
Status: certificates.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Denied",
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Foo",
|
||||
Message: "Foo message",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
strategy.PrepareForUpdate(context.Background(), processedPCR, oldPCR)
|
||||
|
||||
if diff := cmp.Diff(processedPCR, wantPCR); diff != "" {
|
||||
t.Errorf("Bad processed PCR; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatusPrepareForUpdate(t *testing.T) {
|
||||
// StatusStrategy.PrepareForUpdate should reset all spec fields and most
|
||||
// metadata fields in the new object.
|
||||
|
||||
strategy := NewStrategy()
|
||||
authz := &FakeAuthorizer{
|
||||
authorized: authorizer.DecisionAllow,
|
||||
}
|
||||
statusStrategy := NewStatusStrategy(strategy, authz, clock.RealClock{})
|
||||
|
||||
_, _, pubPKIX1, proof1 := mustMakeEd25519KeyAndProof(t, []byte("pod-1-uid"))
|
||||
|
||||
oldPCR := &certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID("pod-1-uid"),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
Status: certificates.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Denied",
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Foo",
|
||||
Message: "Foo message",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
processedPCR := &certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/different-value",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID("pod-1-uid"),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
Status: certificates.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Failed",
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Foo",
|
||||
Message: "Foo message",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
wantPCR := &certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID("pod-1-uid"),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
Status: certificates.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Failed",
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Foo",
|
||||
Message: "Foo message",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
statusStrategy.PrepareForUpdate(context.Background(), processedPCR, oldPCR)
|
||||
|
||||
if diff := cmp.Diff(processedPCR, wantPCR); diff != "" {
|
||||
t.Errorf("Bad processed PCR; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatusValidateUpdate(t *testing.T) {
|
||||
|
||||
_, _, pubPKIX1, proof1 := mustMakeEd25519KeyAndProof(t, []byte("pod-1-uid"))
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
oldPCR, newPCR *certificates.PodCertificateRequest
|
||||
authz authorizer.Authorizer
|
||||
wantValidationErrors field.ErrorList
|
||||
}{
|
||||
{
|
||||
desc: "No errors when the caller is authorized",
|
||||
oldPCR: &certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID("pod-1-uid"),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
},
|
||||
newPCR: &certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID("pod-1-uid"),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
Status: certificates.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Failed",
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Foo",
|
||||
Message: "Foo message",
|
||||
LastTransitionTime: metav1.NewTime(time.Now()),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
authz: &FakeAuthorizer{
|
||||
authorized: authorizer.DecisionAllow,
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Error when the caller is not authorized",
|
||||
oldPCR: &certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID("pod-1-uid"),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
},
|
||||
newPCR: &certificates.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: certificates.PodCertificateRequestSpec{
|
||||
SignerName: "foo.com/abc",
|
||||
PodName: "pod-1",
|
||||
PodUID: types.UID("pod-1-uid"),
|
||||
ServiceAccountName: "sa-1",
|
||||
ServiceAccountUID: "sa-uid-1",
|
||||
NodeName: "node-1",
|
||||
NodeUID: "node-uid-1",
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
PKIXPublicKey: pubPKIX1,
|
||||
ProofOfPossession: proof1,
|
||||
},
|
||||
Status: certificates.PodCertificateRequestStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: "Failed",
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Foo",
|
||||
Message: "Foo message",
|
||||
LastTransitionTime: metav1.NewTime(time.Now()),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
authz: &FakeAuthorizer{
|
||||
authorized: authorizer.DecisionNoOpinion,
|
||||
reason: "not authorized",
|
||||
},
|
||||
wantValidationErrors: field.ErrorList{
|
||||
field.Forbidden(field.NewPath("spec", "signerName"), "User \"bob\" is not permitted to \"sign\" for signer \"foo.com/abc\""),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx = request.WithUser(ctx, &user.DefaultInfo{Name: "bob"})
|
||||
|
||||
strategy := NewStrategy()
|
||||
statusStrategy := NewStatusStrategy(strategy, tc.authz, clock.RealClock{})
|
||||
|
||||
gotValidationErrors := statusStrategy.ValidateUpdate(ctx, tc.newPCR, tc.oldPCR)
|
||||
if diff := cmp.Diff(gotValidationErrors, tc.wantValidationErrors); diff != "" {
|
||||
t.Errorf("Wrong validation errors; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func mustMakeEd25519KeyAndProof(t *testing.T, toBeSigned []byte) (ed25519.PrivateKey, ed25519.PublicKey, []byte, []byte) {
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while generating ed25519 key: %v", err)
|
||||
}
|
||||
pubPKIX, err := x509.MarshalPKIXPublicKey(pub)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while marshaling PKIX public key: %v", err)
|
||||
}
|
||||
sig := ed25519.Sign(priv, toBeSigned)
|
||||
return priv, pub, pubPKIX, sig
|
||||
}
|
||||
|
||||
type FakeAuthorizer struct {
|
||||
authorized authorizer.Decision
|
||||
reason string
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *FakeAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) {
|
||||
return f.authorized, f.reason, f.err
|
||||
}
|
||||
|
|
@ -17,9 +17,12 @@ limitations under the License.
|
|||
package rest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
certificatesapiv1 "k8s.io/api/certificates/v1"
|
||||
certificatesapiv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
certificatesapiv1beta1 "k8s.io/api/certificates/v1beta1"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
|
|
@ -31,11 +34,19 @@ import (
|
|||
"k8s.io/kubernetes/pkg/features"
|
||||
certificatestore "k8s.io/kubernetes/pkg/registry/certificates/certificates/storage"
|
||||
clustertrustbundlestore "k8s.io/kubernetes/pkg/registry/certificates/clustertrustbundle/storage"
|
||||
podcertificaterequeststore "k8s.io/kubernetes/pkg/registry/certificates/podcertificaterequest/storage"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
type RESTStorageProvider struct{}
|
||||
type RESTStorageProvider struct {
|
||||
Authorizer authorizer.Authorizer
|
||||
}
|
||||
|
||||
func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, error) {
|
||||
if p.Authorizer == nil {
|
||||
return genericapiserver.APIGroupInfo{}, fmt.Errorf("certificates REST storage requires an authorizer")
|
||||
}
|
||||
|
||||
apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(certificates.GroupName, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs)
|
||||
// If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities.
|
||||
// TODO refactor the plumbing to provide the information in the APIGroupInfo
|
||||
|
|
@ -110,6 +121,19 @@ func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstora
|
|||
}
|
||||
}
|
||||
|
||||
if resource := "podcertificaterequests"; apiResourceConfigSource.ResourceEnabled(certificatesapiv1alpha1.SchemeGroupVersion.WithResource(resource)) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodCertificateRequest) {
|
||||
pcrStorage, pcrStatusStorage, err := podcertificaterequeststore.NewREST(restOptionsGetter, p.Authorizer, clock.RealClock{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storage[resource] = pcrStorage
|
||||
storage[resource+"/status"] = pcrStatusStorage
|
||||
} else {
|
||||
klog.Warning("PodCertificateRequest storage is disabled because the PodCertificateRequest feature gate is disabled")
|
||||
}
|
||||
}
|
||||
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package volume
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
|
@ -312,6 +313,9 @@ type KubeletVolumeHost interface {
|
|||
// Returns trust anchors from the ClusterTrustBundles selected by signer
|
||||
// name and label selector.
|
||||
GetTrustAnchorsBySigner(signerName string, labelSelector *metav1.LabelSelector, allowMissing bool) ([]byte, error)
|
||||
|
||||
// Returns the credential bundle for the specified podCertificate projected volume source.
|
||||
GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) ([]byte, []byte, error)
|
||||
}
|
||||
|
||||
// CSIDriverVolumeHost is a volume host that has access to CSIDriverLister
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ limitations under the License.
|
|||
package projected
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
|
|
@ -254,7 +256,7 @@ func (s *projectedVolumeMounter) collectData(mounterArgs volume.MounterArgs) (ma
|
|||
|
||||
errlist := []error{}
|
||||
payload := make(map[string]volumeutil.FileProjection)
|
||||
for _, source := range s.source.Sources {
|
||||
for sourceIndex, source := range s.source.Sources {
|
||||
switch {
|
||||
case source.Secret != nil:
|
||||
optional := source.Secret.Optional != nil && *source.Secret.Optional
|
||||
|
|
@ -386,6 +388,43 @@ func (s *projectedVolumeMounter) collectData(mounterArgs volume.MounterArgs) (ma
|
|||
Mode: mode,
|
||||
FsUser: mounterArgs.FsUser,
|
||||
}
|
||||
case source.PodCertificate != nil:
|
||||
key, certificates, err := s.plugin.kvHost.GetPodCertificateCredentialBundle(context.TODO(), s.pod.ObjectMeta.Namespace, s.pod.ObjectMeta.Name, string(s.pod.ObjectMeta.UID), s.volName, sourceIndex)
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
continue
|
||||
}
|
||||
|
||||
mode := *s.source.DefaultMode
|
||||
if mounterArgs.FsUser != nil || mounterArgs.FsGroup != nil {
|
||||
mode = 0600
|
||||
}
|
||||
|
||||
if source.PodCertificate.CredentialBundlePath != "" {
|
||||
credentialBundle := bytes.Buffer{}
|
||||
credentialBundle.Write(key)
|
||||
credentialBundle.Write(certificates)
|
||||
payload[source.PodCertificate.CredentialBundlePath] = volumeutil.FileProjection{
|
||||
Data: credentialBundle.Bytes(),
|
||||
Mode: mode,
|
||||
FsUser: mounterArgs.FsUser,
|
||||
}
|
||||
}
|
||||
if source.PodCertificate.KeyPath != "" {
|
||||
payload[source.PodCertificate.KeyPath] = volumeutil.FileProjection{
|
||||
Data: key,
|
||||
Mode: mode,
|
||||
FsUser: mounterArgs.FsUser,
|
||||
}
|
||||
}
|
||||
if source.PodCertificate.CertificateChainPath != "" {
|
||||
payload[source.PodCertificate.CertificateChainPath] = volumeutil.FileProjection{
|
||||
Data: certificates,
|
||||
Mode: mode,
|
||||
FsUser: mounterArgs.FsUser,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return payload, utilerrors.NewAggregate(errlist)
|
||||
|
|
@ -413,6 +452,7 @@ func (c *projectedVolumeUnmounter) TearDownAt(dir string) error {
|
|||
}
|
||||
|
||||
c.plugin.deleteServiceAccountToken(c.podUID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -421,5 +461,5 @@ func getVolumeSource(spec *volume.Spec) (*v1.ProjectedVolumeSource, bool, error)
|
|||
return spec.Volume.Projected, spec.ReadOnly, nil
|
||||
}
|
||||
|
||||
return nil, false, fmt.Errorf("Spec does not reference a projected volume type")
|
||||
return nil, false, fmt.Errorf("spec does not reference a projected volume type")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1038,6 +1038,109 @@ func TestCollectDataWithClusterTrustBundle(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCollectDataWithPodCertificate(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
||||
source v1.ProjectedVolumeSource
|
||||
bundles []runtime.Object
|
||||
|
||||
fsUser *int64
|
||||
fsGroup *int64
|
||||
|
||||
wantPayload map[string]util.FileProjection
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "credential bundle",
|
||||
source: v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &v1.PodCertificateProjection{
|
||||
SignerName: "example.com/foo",
|
||||
KeyType: "ED25519",
|
||||
CredentialBundlePath: "credbundle.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
DefaultMode: ptr.To[int32](0644),
|
||||
},
|
||||
bundles: []runtime.Object{},
|
||||
wantPayload: map[string]util.FileProjection{
|
||||
"credbundle.pem": {
|
||||
Data: []byte("key\ncert\n"), // fake kubelet volume host is hardcoded to return this string.
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "key and cert bundle",
|
||||
source: v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &v1.PodCertificateProjection{
|
||||
SignerName: "example.com/foo",
|
||||
KeyType: "ED25519",
|
||||
KeyPath: "key.pem",
|
||||
CertificateChainPath: "certificates.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
DefaultMode: ptr.To[int32](0644),
|
||||
},
|
||||
bundles: []runtime.Object{},
|
||||
wantPayload: map[string]util.FileProjection{
|
||||
"key.pem": {
|
||||
Data: []byte("key\n"), // fake kubelet volume host is hardcoded to return this string.
|
||||
Mode: 0644,
|
||||
},
|
||||
"certificates.pem": {
|
||||
Data: []byte("cert\n"), // fake kubelet volume host is hardcoded to return this string.
|
||||
Mode: 0644,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
UID: types.UID("test_pod_uid"),
|
||||
},
|
||||
Spec: v1.PodSpec{ServiceAccountName: "foo"},
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset(tc.bundles...)
|
||||
|
||||
tempDir, host := newTestHost(t, client)
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
var myVolumeMounter = projectedVolumeMounter{
|
||||
projectedVolume: &projectedVolume{
|
||||
sources: tc.source.Sources,
|
||||
podUID: pod.UID,
|
||||
plugin: &projectedPlugin{
|
||||
host: host,
|
||||
kvHost: host.(volume.KubeletVolumeHost),
|
||||
},
|
||||
},
|
||||
source: tc.source,
|
||||
pod: pod,
|
||||
}
|
||||
|
||||
gotPayload, err := myVolumeMounter.collectData(volume.MounterArgs{FsUser: tc.fsUser, FsGroup: tc.fsGroup})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected failure making payload: %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(tc.wantPayload, gotPayload); diff != "" {
|
||||
t.Fatalf("Bad payload; diff (-want +got)\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.VolumeHost) {
|
||||
tempDir, err := os.MkdirTemp("", "projected_volume_test.")
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -432,3 +432,7 @@ func (f *fakeKubeletVolumeHost) GetTrustAnchorsBySigner(signerName string, label
|
|||
|
||||
return fullSet.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *fakeKubeletVolumeHost) GetPodCertificateCredentialBundle(ctx context.Context, namespace, podName, podUID, volumeName string, sourceIndex int) ([]byte, []byte, error) {
|
||||
return []byte("key\n"), []byte("cert\n"), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,14 +21,12 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
api "k8s.io/kubernetes/pkg/apis/certificates"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/certificates"
|
||||
"k8s.io/kubernetes/pkg/certauthorization"
|
||||
)
|
||||
|
||||
// PluginName is a string with the name of the plugin
|
||||
|
|
@ -90,7 +88,7 @@ func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, _ admissi
|
|||
return admission.NewForbidden(a, fmt.Errorf("expected type CertificateSigningRequest, got: %T", a.GetOldObject()))
|
||||
}
|
||||
|
||||
if !certificates.IsAuthorizedForSignerName(ctx, p.authz, a.GetUserInfo(), "approve", csr.Spec.SignerName) {
|
||||
if !certauthorization.IsAuthorizedForSignerName(ctx, p.authz, a.GetUserInfo(), "approve", csr.Spec.SignerName) {
|
||||
klog.V(4).Infof("user not permitted to approve CertificateSigningRequest %q with signerName %q", csr.Name, csr.Spec.SignerName)
|
||||
return admission.NewForbidden(a, fmt.Errorf("user not permitted to approve requests with signerName %q", csr.Spec.SignerName))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,9 +28,9 @@ import (
|
|||
"k8s.io/klog/v2"
|
||||
api "k8s.io/kubernetes/pkg/apis/certificates"
|
||||
kapihelper "k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
"k8s.io/kubernetes/pkg/certauthorization"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/registry/rbac"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/certificates"
|
||||
)
|
||||
|
||||
const PluginName = "ClusterTrustBundleAttest"
|
||||
|
|
@ -116,7 +116,7 @@ func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, _ admissi
|
|||
return nil
|
||||
}
|
||||
|
||||
if !certificates.IsAuthorizedForSignerName(ctx, p.authz, a.GetUserInfo(), "attest", newBundle.Spec.SignerName) {
|
||||
if !certauthorization.IsAuthorizedForSignerName(ctx, p.authz, a.GetUserInfo(), "attest", newBundle.Spec.SignerName) {
|
||||
klog.V(4).Infof("user not permitted to attest ClusterTrustBundle %q with signerName %q", newBundle.Name, newBundle.Spec.SignerName)
|
||||
return admission.NewForbidden(a, fmt.Errorf("user not permitted to attest for signerName %q", newBundle.Spec.SignerName))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
api "k8s.io/kubernetes/pkg/apis/certificates"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/certificates"
|
||||
"k8s.io/kubernetes/pkg/certauthorization"
|
||||
)
|
||||
|
||||
// PluginName is a string with the name of the plugin
|
||||
|
|
@ -97,7 +97,7 @@ func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, o admissi
|
|||
return nil
|
||||
}
|
||||
|
||||
if !certificates.IsAuthorizedForSignerName(ctx, p.authz, a.GetUserInfo(), "sign", oldCSR.Spec.SignerName) {
|
||||
if !certauthorization.IsAuthorizedForSignerName(ctx, p.authz, a.GetUserInfo(), "sign", oldCSR.Spec.SignerName) {
|
||||
klog.V(4).Infof("user not permitted to sign CertificateSigningRequest %q with signerName %q", oldCSR.Name, oldCSR.Spec.SignerName)
|
||||
return admission.NewForbidden(a, fmt.Errorf("user not permitted to sign requests with signerName %q", oldCSR.Spec.SignerName))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,20 +76,23 @@ func NewPlugin(nodeIdentifier nodeidentifier.NodeIdentifier) *Plugin {
|
|||
// Plugin holds state for and implements the admission plugin.
|
||||
type Plugin struct {
|
||||
*admission.Handler
|
||||
nodeIdentifier nodeidentifier.NodeIdentifier
|
||||
podsGetter corev1lister.PodLister
|
||||
nodesGetter corev1lister.NodeLister
|
||||
csiDriverGetter storagelisters.CSIDriverLister
|
||||
pvcGetter corev1lister.PersistentVolumeClaimLister
|
||||
pvGetter corev1lister.PersistentVolumeLister
|
||||
csiTranslator csitrans.CSITranslator
|
||||
nodeIdentifier nodeidentifier.NodeIdentifier
|
||||
podsGetter corev1lister.PodLister
|
||||
nodesGetter corev1lister.NodeLister
|
||||
serviceAccountGetter corev1lister.ServiceAccountLister
|
||||
csiDriverGetter storagelisters.CSIDriverLister
|
||||
pvcGetter corev1lister.PersistentVolumeClaimLister
|
||||
pvGetter corev1lister.PersistentVolumeLister
|
||||
csiTranslator csitrans.CSITranslator
|
||||
|
||||
authz authorizer.Authorizer
|
||||
|
||||
inspectedFeatureGates bool
|
||||
expansionRecoveryEnabled bool
|
||||
dynamicResourceAllocationEnabled bool
|
||||
allowInsecureKubeletCertificateSigningRequests bool
|
||||
serviceAccountNodeAudienceRestriction bool
|
||||
podCertificateRequestsEnabled bool
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
@ -105,6 +108,8 @@ func (p *Plugin) InspectFeatureGates(featureGates featuregate.FeatureGate) {
|
|||
p.dynamicResourceAllocationEnabled = featureGates.Enabled(features.DynamicResourceAllocation)
|
||||
p.allowInsecureKubeletCertificateSigningRequests = featureGates.Enabled(features.AllowInsecureKubeletCertificateSigningRequests)
|
||||
p.serviceAccountNodeAudienceRestriction = featureGates.Enabled(features.ServiceAccountNodeAudienceRestriction)
|
||||
p.podCertificateRequestsEnabled = featureGates.Enabled(features.PodCertificateRequest)
|
||||
p.inspectedFeatureGates = true
|
||||
}
|
||||
|
||||
// SetExternalKubeInformerFactory registers an informer factory into Plugin
|
||||
|
|
@ -117,6 +122,7 @@ func (p *Plugin) SetExternalKubeInformerFactory(f informers.SharedInformerFactor
|
|||
p.pvGetter = f.Core().V1().PersistentVolumes().Lister()
|
||||
p.csiTranslator = csitrans.New()
|
||||
}
|
||||
p.serviceAccountGetter = f.Core().V1().ServiceAccounts().Lister()
|
||||
}
|
||||
|
||||
// ValidateInitialization validates the Plugin was initialized properly
|
||||
|
|
@ -144,6 +150,12 @@ func (p *Plugin) ValidateInitialization() error {
|
|||
return fmt.Errorf("%s requires an authorizer", PluginName)
|
||||
}
|
||||
}
|
||||
if p.serviceAccountGetter == nil {
|
||||
return fmt.Errorf("%s requires a service account getter", PluginName)
|
||||
}
|
||||
if !p.inspectedFeatureGates {
|
||||
return fmt.Errorf("%s has not inspected feature gates", PluginName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -155,22 +167,23 @@ func (p *Plugin) SetAuthorizer(authz authorizer.Authorizer) {
|
|||
}
|
||||
|
||||
var (
|
||||
podResource = api.Resource("pods")
|
||||
nodeResource = api.Resource("nodes")
|
||||
pvcResource = api.Resource("persistentvolumeclaims")
|
||||
svcacctResource = api.Resource("serviceaccounts")
|
||||
leaseResource = coordapi.Resource("leases")
|
||||
csiNodeResource = storage.Resource("csinodes")
|
||||
resourceSliceResource = resource.Resource("resourceslices")
|
||||
csrResource = certapi.Resource("certificatesigningrequests")
|
||||
podResource = api.Resource("pods")
|
||||
nodeResource = api.Resource("nodes")
|
||||
pvcResource = api.Resource("persistentvolumeclaims")
|
||||
svcacctResource = api.Resource("serviceaccounts")
|
||||
leaseResource = coordapi.Resource("leases")
|
||||
csiNodeResource = storage.Resource("csinodes")
|
||||
resourceSliceResource = resource.Resource("resourceslices")
|
||||
csrResource = certapi.Resource("certificatesigningrequests")
|
||||
podCertificateRequestResource = certapi.Resource("podcertificaterequests")
|
||||
)
|
||||
|
||||
// Admit checks the admission policy and triggers corresponding actions
|
||||
func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error {
|
||||
nodeName, isNode := p.nodeIdentifier.NodeIdentity(a.GetUserInfo())
|
||||
|
||||
// Our job is just to restrict nodes
|
||||
if !isNode {
|
||||
// The calling user is not a node, so they should not be node-restricted.
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -179,6 +192,9 @@ func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.
|
|||
return admission.NewForbidden(a, fmt.Errorf("could not determine node from user %q", a.GetUserInfo().GetName()))
|
||||
}
|
||||
|
||||
// At this point, the caller has been affirmitively matched up to a node
|
||||
// name.
|
||||
|
||||
// TODO: if node doesn't exist and this isn't a create node request, then reject.
|
||||
|
||||
switch a.GetResource().GroupResource() {
|
||||
|
|
@ -208,6 +224,9 @@ func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.
|
|||
case svcacctResource:
|
||||
return p.admitServiceAccount(ctx, nodeName, a)
|
||||
|
||||
case podCertificateRequestResource:
|
||||
return p.admitPodCertificateRequest(nodeName, a)
|
||||
|
||||
case leaseResource:
|
||||
return p.admitLease(nodeName, a)
|
||||
|
||||
|
|
@ -773,6 +792,83 @@ func (p *Plugin) csiDriverHasAudience(driverName, audience string) (bool, error)
|
|||
return false, nil
|
||||
}
|
||||
|
||||
func (p *Plugin) admitPodCertificateRequest(nodeName string, a admission.Attributes) error {
|
||||
if !p.podCertificateRequestsEnabled {
|
||||
return admission.NewForbidden(a, fmt.Errorf("PodCertificateRequest feature gate is disabled"))
|
||||
}
|
||||
|
||||
if a.GetOperation() != admission.Create {
|
||||
return admission.NewForbidden(a, fmt.Errorf("unexpected operation %v", a.GetOperation()))
|
||||
}
|
||||
|
||||
if len(a.GetSubresource()) != 0 {
|
||||
return admission.NewForbidden(a, fmt.Errorf("unexpected subresource %v", a.GetSubresource()))
|
||||
}
|
||||
|
||||
namespace := a.GetNamespace()
|
||||
|
||||
req, ok := a.GetObject().(*certapi.PodCertificateRequest)
|
||||
if !ok {
|
||||
return admission.NewForbidden(a, fmt.Errorf("unexpected type %T", a.GetObject()))
|
||||
}
|
||||
|
||||
// Cross check the node name and node UID with the node that made the request.
|
||||
if string(req.Spec.NodeName) != nodeName {
|
||||
return admission.NewForbidden(a, fmt.Errorf("PodCertificateRequest.Spec.NodeName=%q, which is not the requesting node %q", req.Spec.NodeName, nodeName))
|
||||
}
|
||||
node, err := p.nodesGetter.Get(string(req.Spec.NodeName))
|
||||
if apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("while retrieving node %q named in the PodCertificateRequest: %w", req.Spec.NodeName, err)
|
||||
}
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, fmt.Errorf("while retrieving node %q named in the PodCertificateRequest: %w", req.Spec.NodeName, err))
|
||||
}
|
||||
if node.ObjectMeta.UID != req.Spec.NodeUID {
|
||||
// Could be caused by informer lag. Don't return Forbidden to indicate that retries may succeed.
|
||||
return fmt.Errorf("PodCertificateRequest for pod %q names node UID %q, inconsistent with the running node (%q)", namespace+"/"+req.Spec.PodName, req.Spec.NodeUID, node.ObjectMeta.UID)
|
||||
}
|
||||
|
||||
// Cross-check that the pod is a real pod, running on the node.
|
||||
pod, err := p.podsGetter.Pods(namespace).Get(req.Spec.PodName)
|
||||
if apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("while retrieving pod %q named in the PodCertificateRequest: %w", namespace+"/"+req.Spec.PodName, err)
|
||||
}
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, fmt.Errorf("while retrieving pod %q named in the PodCertificateRequest: %w", namespace+"/"+req.Spec.PodName, err))
|
||||
}
|
||||
if req.Spec.PodUID != pod.ObjectMeta.UID {
|
||||
// Could be caused by informer lag. Don't return Forbidden to indicate that retries may succeed.
|
||||
return fmt.Errorf("PodCertificateRequest for pod %q contains pod UID (%q) which differs from running pod %q", namespace+"/"+req.Spec.PodName, req.Spec.PodUID, string(pod.ObjectMeta.UID))
|
||||
}
|
||||
if pod.Spec.NodeName != string(req.Spec.NodeName) {
|
||||
return admission.NewForbidden(a, fmt.Errorf("pod %q is not running on node %q named in the PodCertificateRequest", namespace+"/"+req.Spec.PodName, req.Spec.NodeName))
|
||||
}
|
||||
|
||||
// Mirror pods don't get pod certificates.
|
||||
if _, isMirror := pod.Annotations[api.MirrorPodAnnotationKey]; isMirror {
|
||||
return admission.NewForbidden(a, fmt.Errorf("pod %q is a mirror pod", namespace+"/"+req.Spec.PodName))
|
||||
}
|
||||
|
||||
if req.Spec.ServiceAccountName != pod.Spec.ServiceAccountName {
|
||||
// We can outright forbid because this cannot be caused by informer lag (the UIDs match)
|
||||
return admission.NewForbidden(a, fmt.Errorf("PodCertificateRequest for pod %q contains serviceAccountName (%q) that differs from running pod (%q)", namespace+"/"+req.Spec.PodName, req.Spec.ServiceAccountName, pod.Spec.ServiceAccountName))
|
||||
}
|
||||
sa, err := p.serviceAccountGetter.ServiceAccounts(namespace).Get(req.Spec.ServiceAccountName)
|
||||
if apierrors.IsNotFound(err) {
|
||||
// Could be caused by informer lag. Don't return Forbidden to indicate that retries may succeed.
|
||||
return fmt.Errorf("while retrieving service account %q named in the PodCertificateRequest: %w", namespace+"/"+req.Spec.ServiceAccountName, err)
|
||||
}
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, fmt.Errorf("while retrieving service account %q named in the PodCertificateRequest: %w", namespace+"/"+req.Spec.ServiceAccountName, err))
|
||||
}
|
||||
if req.Spec.ServiceAccountUID != sa.ObjectMeta.UID {
|
||||
// Could be caused by informer lag. Don't return Forbidden to indicate that retries may succeed.
|
||||
return fmt.Errorf("PodCertificateRequest for pod %q names service account UID %q, which differs from the running service account (%q)", namespace+"/"+req.Spec.PodName, req.Spec.ServiceAccountUID, sa.ObjectMeta.UID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Plugin) admitLease(nodeName string, a admission.Attributes) error {
|
||||
// the request must be against the system namespace reserved for node leases
|
||||
if a.GetNamespace() != api.NamespaceNodeLease {
|
||||
|
|
|
|||
|
|
@ -89,6 +89,18 @@ func makeTestPod(namespace, name, node string, mirror bool) (*api.Pod, *corev1.P
|
|||
return corePod, v1Pod
|
||||
}
|
||||
|
||||
func makeTestServiceAccount(namespace, name string, uid types.UID) *corev1.ServiceAccount {
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
UID: types.UID(uid),
|
||||
},
|
||||
}
|
||||
|
||||
return sa
|
||||
}
|
||||
|
||||
func withLabels(pod *api.Pod, labels map[string]string) *api.Pod {
|
||||
labeledPod := pod.DeepCopy()
|
||||
if labels == nil {
|
||||
|
|
@ -220,17 +232,18 @@ func setForbiddenUpdateLabels(node *api.Node, value string) *api.Node {
|
|||
}
|
||||
|
||||
type admitTestCase struct {
|
||||
name string
|
||||
podsGetter corev1lister.PodLister
|
||||
nodesGetter corev1lister.NodeLister
|
||||
csiDriverGetter storagelisters.CSIDriverLister
|
||||
pvcGetter corev1lister.PersistentVolumeClaimLister
|
||||
pvGetter corev1lister.PersistentVolumeLister
|
||||
attributes admission.Attributes
|
||||
features featuregate.FeatureGate
|
||||
setupFunc func(t *testing.T)
|
||||
err string
|
||||
authz authorizer.Authorizer
|
||||
name string
|
||||
podsGetter corev1lister.PodLister
|
||||
nodesGetter corev1lister.NodeLister
|
||||
serviceAccountGetter corev1lister.ServiceAccountLister
|
||||
csiDriverGetter storagelisters.CSIDriverLister
|
||||
pvcGetter corev1lister.PersistentVolumeClaimLister
|
||||
pvGetter corev1lister.PersistentVolumeLister
|
||||
attributes admission.Attributes
|
||||
features featuregate.FeatureGate
|
||||
setupFunc func(t *testing.T)
|
||||
err string
|
||||
authz authorizer.Authorizer
|
||||
}
|
||||
|
||||
func (a *admitTestCase) run(t *testing.T) {
|
||||
|
|
@ -244,6 +257,7 @@ func (a *admitTestCase) run(t *testing.T) {
|
|||
}
|
||||
c.podsGetter = a.podsGetter
|
||||
c.nodesGetter = a.nodesGetter
|
||||
c.serviceAccountGetter = a.serviceAccountGetter
|
||||
c.csiDriverGetter = a.csiDriverGetter
|
||||
c.pvcGetter = a.pvcGetter
|
||||
c.pvGetter = a.pvGetter
|
||||
|
|
@ -578,6 +592,40 @@ func Test_nodePlugin_Admit(t *testing.T) {
|
|||
claimpod, _ := makeTestPod("ns", "myclaimpod", "mynode", true)
|
||||
claimpod.Spec.ResourceClaims = []api.PodResourceClaim{{Name: "myclaim", ResourceClaimName: ptr.To("myexternalclaim")}}
|
||||
|
||||
pcrServiceAccountIndex := cache.NewIndexer(cache.MetaNamespaceKeyFunc, nil)
|
||||
pcrServiceAccounts := corev1lister.NewServiceAccountLister(pcrServiceAccountIndex)
|
||||
|
||||
pcrSA := makeTestServiceAccount("ns", "pcr-sa", "pcr-sa-uid")
|
||||
checkNilError(t, pcrServiceAccountIndex.Add(pcrSA))
|
||||
|
||||
pcrNodeIndex := cache.NewIndexer(cache.MetaNamespaceKeyFunc, nil)
|
||||
pcrNodes := corev1lister.NewNodeLister(pcrNodeIndex)
|
||||
|
||||
pcrNode1 := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pcr-node-1",
|
||||
UID: "pcr-node-1-uid",
|
||||
},
|
||||
}
|
||||
checkNilError(t, pcrNodeIndex.Add(pcrNode1))
|
||||
pcrNode1UserInfo := &user.DefaultInfo{Name: "system:node:pcr-node-1", Groups: []string{"system:nodes"}}
|
||||
|
||||
pcrNode2 := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pcr-node-1",
|
||||
UID: "pcr-node-1-uid",
|
||||
},
|
||||
}
|
||||
checkNilError(t, pcrNodeIndex.Add(pcrNode2))
|
||||
pcrNode2UserInfo := &user.DefaultInfo{Name: "system:node:pcr-node-2", Groups: []string{"system:nodes"}}
|
||||
|
||||
pcrPodIndex := cache.NewIndexer(cache.MetaNamespaceKeyFunc, nil)
|
||||
pcrPods := corev1lister.NewPodLister(pcrPodIndex)
|
||||
|
||||
_, v1PodRequestingPCR := makeTestPod("ns", "pcrpod", pcrNode1.ObjectMeta.Name, false)
|
||||
v1PodRequestingPCR.Spec.ServiceAccountName = pcrSA.Name
|
||||
checkNilError(t, pcrPodIndex.Add(v1PodRequestingPCR))
|
||||
|
||||
tests := []admitTestCase{
|
||||
// Mirror pods bound to us
|
||||
{
|
||||
|
|
@ -1762,9 +1810,143 @@ func Test_nodePlugin_Admit(t *testing.T) {
|
|||
attributes: createCSRAttributes("system:node:mynode", certificatesapi.KubeletServingSignerName, false, privKey, mynode),
|
||||
err: "unable to parse csr: asn1: syntax error: sequence truncated",
|
||||
},
|
||||
|
||||
// PodCertificateRequest
|
||||
{
|
||||
name: "deny node1 create PCR when feature gate disabled",
|
||||
podsGetter: pcrPods,
|
||||
serviceAccountGetter: pcrServiceAccounts,
|
||||
nodesGetter: pcrNodes,
|
||||
attributes: createPCRAttributes(v1PodRequestingPCR.ObjectMeta.Namespace, v1PodRequestingPCR.ObjectMeta.Name, v1PodRequestingPCR.ObjectMeta.UID, v1PodRequestingPCR.Spec.ServiceAccountName, "", pcrNode1.ObjectMeta.Name, pcrNode1.ObjectMeta.UID, pcrNode1UserInfo),
|
||||
err: "PodCertificateRequest feature gate is disabled",
|
||||
},
|
||||
{
|
||||
name: "allow node1 create PCR that references pod on node1",
|
||||
podsGetter: pcrPods,
|
||||
serviceAccountGetter: pcrServiceAccounts,
|
||||
nodesGetter: pcrNodes,
|
||||
attributes: createPCRAttributes(v1PodRequestingPCR.ObjectMeta.Namespace, v1PodRequestingPCR.ObjectMeta.Name, v1PodRequestingPCR.ObjectMeta.UID, pcrSA.ObjectMeta.Name, pcrSA.ObjectMeta.UID, pcrNode1.ObjectMeta.Name, pcrNode1.ObjectMeta.UID, pcrNode1UserInfo),
|
||||
features: feature.DefaultFeatureGate,
|
||||
setupFunc: func(t *testing.T) {
|
||||
t.Helper()
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodCertificateRequest, true)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deny create node2 create PCR that references pod on node1",
|
||||
podsGetter: pcrPods,
|
||||
serviceAccountGetter: pcrServiceAccounts,
|
||||
nodesGetter: pcrNodes,
|
||||
attributes: createPCRAttributes(v1PodRequestingPCR.ObjectMeta.Namespace, v1PodRequestingPCR.ObjectMeta.Name, v1PodRequestingPCR.ObjectMeta.UID, pcrSA.ObjectMeta.Name, pcrSA.ObjectMeta.UID, pcrNode1.ObjectMeta.Name, pcrNode1.ObjectMeta.UID, pcrNode2UserInfo),
|
||||
features: feature.DefaultFeatureGate,
|
||||
setupFunc: func(t *testing.T) {
|
||||
t.Helper()
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodCertificateRequest, true)
|
||||
},
|
||||
err: `PodCertificateRequest.Spec.NodeName="pcr-node-1", which is not the requesting node "pcr-node-2"`,
|
||||
},
|
||||
{
|
||||
name: "deny node1 create PCR that references nonexistent pod",
|
||||
podsGetter: pcrPods,
|
||||
serviceAccountGetter: pcrServiceAccounts,
|
||||
nodesGetter: pcrNodes,
|
||||
attributes: createPCRAttributes("ns", "nonexistent-pod", "nonexistent-pod-uid", pcrSA.ObjectMeta.Name, pcrSA.ObjectMeta.UID, pcrNode1.ObjectMeta.Name, pcrNode1.ObjectMeta.UID, pcrNode1UserInfo),
|
||||
features: feature.DefaultFeatureGate,
|
||||
setupFunc: func(t *testing.T) {
|
||||
t.Helper()
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodCertificateRequest, true)
|
||||
},
|
||||
err: `pod "nonexistent-pod" not found`,
|
||||
},
|
||||
{
|
||||
name: "deny node1 create PCR that references nonexistent sa",
|
||||
podsGetter: pcrPods,
|
||||
serviceAccountGetter: pcrServiceAccounts,
|
||||
nodesGetter: pcrNodes,
|
||||
attributes: createPCRAttributes(v1PodRequestingPCR.ObjectMeta.Namespace, v1PodRequestingPCR.ObjectMeta.Name, v1PodRequestingPCR.ObjectMeta.UID, "nonexistent-sa", "nonexistent-sa-uid", pcrNode1.ObjectMeta.Name, pcrNode1.ObjectMeta.UID, pcrNode1UserInfo),
|
||||
features: feature.DefaultFeatureGate,
|
||||
setupFunc: func(t *testing.T) {
|
||||
t.Helper()
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodCertificateRequest, true)
|
||||
},
|
||||
err: `PodCertificateRequest for pod "ns/pcrpod" contains serviceAccountName ("nonexistent-sa") that differs from running pod ("pcr-sa")`,
|
||||
},
|
||||
{
|
||||
name: "deny node1 create PCR that references nonexistent node",
|
||||
podsGetter: pcrPods,
|
||||
serviceAccountGetter: pcrServiceAccounts,
|
||||
nodesGetter: pcrNodes,
|
||||
attributes: createPCRAttributes(v1PodRequestingPCR.ObjectMeta.Namespace, v1PodRequestingPCR.ObjectMeta.Name, v1PodRequestingPCR.ObjectMeta.UID, pcrSA.ObjectMeta.Name, pcrSA.ObjectMeta.UID, "nonexistent-node", "nonexistent-node-uid", pcrNode1UserInfo),
|
||||
features: feature.DefaultFeatureGate,
|
||||
setupFunc: func(t *testing.T) {
|
||||
t.Helper()
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodCertificateRequest, true)
|
||||
},
|
||||
err: `PodCertificateRequest.Spec.NodeName="nonexistent-node", which is not the requesting node "pcr-node-1"`,
|
||||
},
|
||||
{
|
||||
name: "deny node1 create PCR with mismatched pod UID",
|
||||
podsGetter: pcrPods,
|
||||
serviceAccountGetter: pcrServiceAccounts,
|
||||
nodesGetter: pcrNodes,
|
||||
attributes: createPCRAttributes(v1PodRequestingPCR.ObjectMeta.Namespace, v1PodRequestingPCR.ObjectMeta.Name, "wrong-uid", pcrSA.ObjectMeta.Name, pcrSA.ObjectMeta.UID, pcrNode1.ObjectMeta.Name, pcrNode1.ObjectMeta.UID, pcrNode1UserInfo),
|
||||
features: feature.DefaultFeatureGate,
|
||||
setupFunc: func(t *testing.T) {
|
||||
t.Helper()
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodCertificateRequest, true)
|
||||
},
|
||||
err: `PodCertificateRequest for pod "ns/pcrpod" contains pod UID ("wrong-uid") which differs from running pod "pod-uid"`,
|
||||
},
|
||||
{
|
||||
name: "deny node1 create PCR with mismatched SA UID",
|
||||
podsGetter: pcrPods,
|
||||
serviceAccountGetter: pcrServiceAccounts,
|
||||
nodesGetter: pcrNodes,
|
||||
attributes: createPCRAttributes(
|
||||
v1PodRequestingPCR.ObjectMeta.Namespace,
|
||||
v1PodRequestingPCR.ObjectMeta.Name,
|
||||
v1PodRequestingPCR.ObjectMeta.UID,
|
||||
pcrSA.ObjectMeta.Name,
|
||||
"wrong-uid",
|
||||
pcrNode1.ObjectMeta.Name,
|
||||
pcrNode1.ObjectMeta.UID,
|
||||
pcrNode1UserInfo,
|
||||
),
|
||||
features: feature.DefaultFeatureGate,
|
||||
setupFunc: func(t *testing.T) {
|
||||
t.Helper()
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodCertificateRequest, true)
|
||||
},
|
||||
err: `PodCertificateRequest for pod "ns/pcrpod" names service account UID "wrong-uid", which differs from the running service account ("pcr-sa-uid")`,
|
||||
},
|
||||
{
|
||||
name: "deny node1 create PCR with mismatched node UID",
|
||||
podsGetter: pcrPods,
|
||||
serviceAccountGetter: pcrServiceAccounts,
|
||||
nodesGetter: pcrNodes,
|
||||
attributes: createPCRAttributes(
|
||||
v1PodRequestingPCR.ObjectMeta.Namespace,
|
||||
v1PodRequestingPCR.ObjectMeta.Name,
|
||||
v1PodRequestingPCR.ObjectMeta.UID,
|
||||
pcrSA.ObjectMeta.Name,
|
||||
pcrSA.ObjectMeta.UID,
|
||||
pcrNode1.ObjectMeta.Name,
|
||||
"wrong-uid",
|
||||
pcrNode1UserInfo,
|
||||
),
|
||||
features: feature.DefaultFeatureGate,
|
||||
setupFunc: func(t *testing.T) {
|
||||
t.Helper()
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodCertificateRequest, true)
|
||||
},
|
||||
err: `PodCertificateRequest for pod "ns/pcrpod" names node UID "wrong-uid", inconsistent with the running node ("pcr-node-1-uid")`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt.nodesGetter = existingNodes
|
||||
if tt.nodesGetter == nil {
|
||||
tt.nodesGetter = existingNodes
|
||||
}
|
||||
|
||||
tt.run(t)
|
||||
}
|
||||
}
|
||||
|
|
@ -2150,7 +2332,43 @@ func createCSRAttributes(cn, signer string, validCsr bool, key any, user user.In
|
|||
},
|
||||
}
|
||||
return admission.NewAttributesRecord(csreq, nil, csrKind, "", "", csrResource, "", admission.Create, &metav1.CreateOptions{}, false, user)
|
||||
}
|
||||
|
||||
func createPCRAttributes(namespace string, podName string, podUID types.UID, serviceAccountName string, serviceAccountUID types.UID, nodeName string, nodeUID types.UID, user user.Info) admission.Attributes {
|
||||
pcrResource := certificatesapi.Resource("podcertificaterequests").WithVersion("v1alpha1")
|
||||
pcrKind := certificatesapi.Kind("PodCertificateRequest").WithVersion("v1alpha1")
|
||||
|
||||
pcr := &certificatesapi.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certificatesapi.PodCertificateRequestSpec{
|
||||
SignerName: "example.com/foo",
|
||||
PodName: podName,
|
||||
PodUID: types.UID(podUID),
|
||||
ServiceAccountName: serviceAccountName,
|
||||
ServiceAccountUID: types.UID(serviceAccountUID),
|
||||
NodeName: types.NodeName(nodeName),
|
||||
NodeUID: types.UID(nodeUID),
|
||||
// Leave PKIXPublicKey and ProofOfPossession nil, since we're not
|
||||
// actually running validation.
|
||||
},
|
||||
}
|
||||
|
||||
return admission.NewAttributesRecord(
|
||||
pcr,
|
||||
nil,
|
||||
pcrKind,
|
||||
pcr.ObjectMeta.Namespace,
|
||||
pcr.ObjectMeta.Name,
|
||||
pcrResource,
|
||||
"",
|
||||
admission.Create,
|
||||
&metav1.CreateOptions{},
|
||||
false,
|
||||
user,
|
||||
)
|
||||
}
|
||||
|
||||
func TestAdmitResourceSlice(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
certsv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/component-helpers/storage/ephemeral"
|
||||
"k8s.io/dynamic-resource-allocation/resourceclaim"
|
||||
|
|
@ -125,6 +126,7 @@ const (
|
|||
secretVertexType
|
||||
vaVertexType
|
||||
serviceAccountVertexType
|
||||
pcrVertexType
|
||||
)
|
||||
|
||||
var vertexTypes = map[vertexType]string{
|
||||
|
|
@ -138,6 +140,7 @@ var vertexTypes = map[vertexType]string{
|
|||
secretVertexType: "secret",
|
||||
vaVertexType: "volumeattachment",
|
||||
serviceAccountVertexType: "serviceAccount",
|
||||
pcrVertexType: "podcertificaterequest",
|
||||
}
|
||||
|
||||
// vertexTypeWithAuthoritativeIndex indicates which types of vertices can hold
|
||||
|
|
@ -152,6 +155,7 @@ var vertexTypeWithAuthoritativeIndex = map[vertexType]bool{
|
|||
resourceClaimVertexType: true,
|
||||
vaVertexType: true,
|
||||
serviceAccountVertexType: true,
|
||||
pcrVertexType: true,
|
||||
}
|
||||
|
||||
// must be called under a write lock
|
||||
|
|
@ -374,10 +378,9 @@ func (g *Graph) AddPod(pod *corev1.Pod) {
|
|||
return
|
||||
}
|
||||
|
||||
// TODO(mikedanese): If the pod doesn't mount the service account secrets,
|
||||
// should the node still get access to the service account?
|
||||
//
|
||||
// ref https://github.com/kubernetes/kubernetes/issues/58790
|
||||
// The pod unconditionally gets access to the pod's service account. In the
|
||||
// future, this access could be restricted based on whether or not the pod
|
||||
// actually mounts a service account token, or has a podcertificate volume.
|
||||
if len(pod.Spec.ServiceAccountName) > 0 {
|
||||
serviceAccountVertex := g.getOrCreateVertexLocked(serviceAccountVertexType, pod.Namespace, pod.Spec.ServiceAccountName)
|
||||
// Edge adds must be handled by addEdgeLocked instead of direct g.graph.SetEdge calls.
|
||||
|
|
@ -458,6 +461,43 @@ func (g *Graph) DeletePod(name, namespace string) {
|
|||
g.deleteVertexLocked(podVertexType, namespace, name)
|
||||
}
|
||||
|
||||
// AddPodCertificateRequest adds a PodCertificateRequest to the graph.
|
||||
//
|
||||
// PCRs technically have two valid edges:
|
||||
//
|
||||
// * PCR -> Pod (-> Node)
|
||||
//
|
||||
// * PCR -> Node
|
||||
//
|
||||
// We only add the direct PCR -> Node edge, since that is enough to perform the
|
||||
// authorization, and it's a shorter graph traversal. The noderestriction
|
||||
// admission plugin ensures that all PCRs created have a valid node,
|
||||
// serviceaccount, and pod combination that actually exists in the cluster.
|
||||
func (g *Graph) AddPodCertificateRequest(pcr *certsv1alpha1.PodCertificateRequest) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
graphActionsDuration.WithLabelValues("AddPodCertificateRequest").Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
g.lock.Lock()
|
||||
defer g.lock.Unlock()
|
||||
|
||||
g.deleteVertexLocked(pcrVertexType, pcr.ObjectMeta.Namespace, pcr.ObjectMeta.Name)
|
||||
pcrVertex := g.getOrCreateVertexLocked(pcrVertexType, pcr.ObjectMeta.Namespace, pcr.ObjectMeta.Name)
|
||||
nodeVertex := g.getOrCreateVertexLocked(nodeVertexType, "", string(pcr.Spec.NodeName))
|
||||
g.addEdgeLocked(pcrVertex, nodeVertex, nodeVertex)
|
||||
}
|
||||
|
||||
// DeletePodCertificateRequest removes it from the graph.
|
||||
func (g *Graph) DeletePodCertificateRequest(pcr *certsv1alpha1.PodCertificateRequest) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
graphActionsDuration.WithLabelValues("DeletePodCertificateRequest").Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
g.lock.Lock()
|
||||
defer g.lock.Unlock()
|
||||
g.deleteVertexLocked(pcrVertexType, pcr.ObjectMeta.Namespace, pcr.ObjectMeta.Name)
|
||||
}
|
||||
|
||||
// AddPV sets up edges for the following relationships:
|
||||
//
|
||||
// secret -> pv
|
||||
|
|
|
|||
|
|
@ -21,10 +21,12 @@ import (
|
|||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
certsv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
resourceapi "k8s.io/api/resource/v1beta1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
certsv1alpha1informers "k8s.io/client-go/informers/certificates/v1alpha1"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
resourceinformers "k8s.io/client-go/informers/resource/v1beta1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
|
|
@ -43,6 +45,7 @@ func AddGraphEventHandlers(
|
|||
pvs corev1informers.PersistentVolumeInformer,
|
||||
attachments storageinformers.VolumeAttachmentInformer,
|
||||
slices resourceinformers.ResourceSliceInformer,
|
||||
pcrs certsv1alpha1informers.PodCertificateRequestInformer,
|
||||
) {
|
||||
g := &graphPopulator{
|
||||
graph: graph,
|
||||
|
|
@ -79,6 +82,15 @@ func AddGraphEventHandlers(
|
|||
synced = append(synced, sliceHandler.HasSynced)
|
||||
}
|
||||
|
||||
if pcrs != nil {
|
||||
pcrHandler, _ := pcrs.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: g.addPCR,
|
||||
UpdateFunc: nil, // Not needed, spec fields are immutable.
|
||||
DeleteFunc: g.deletePCR,
|
||||
})
|
||||
synced = append(synced, pcrHandler.HasSynced)
|
||||
}
|
||||
|
||||
go cache.WaitForNamedCacheSync("node_authorizer", wait.NeverStop, synced...)
|
||||
}
|
||||
|
||||
|
|
@ -201,3 +213,24 @@ func (g *graphPopulator) deleteResourceSlice(obj interface{}) {
|
|||
}
|
||||
g.graph.DeleteResourceSlice(slice.Name)
|
||||
}
|
||||
|
||||
func (g *graphPopulator) addPCR(obj any) {
|
||||
pcr, ok := obj.(*certsv1alpha1.PodCertificateRequest)
|
||||
if !ok {
|
||||
klog.Infof("unexpected type %T", obj)
|
||||
return
|
||||
}
|
||||
g.graph.AddPodCertificateRequest(pcr)
|
||||
}
|
||||
|
||||
func (g *graphPopulator) deletePCR(obj any) {
|
||||
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
|
||||
obj = tombstone.Obj
|
||||
}
|
||||
pcr, ok := obj.(*certsv1alpha1.PodCertificateRequest)
|
||||
if !ok {
|
||||
klog.Infof("unexpected type %T", obj)
|
||||
return
|
||||
}
|
||||
g.graph.DeletePodCertificateRequest(pcr)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,8 +23,9 @@ import (
|
|||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
certsv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
|
@ -461,10 +462,8 @@ func TestIndex2(t *testing.T) {
|
|||
sort.Strings(sortedTo)
|
||||
actual[toString(g, node.ID())] = sortedTo
|
||||
}
|
||||
if !reflect.DeepEqual(expect, actual) {
|
||||
e, _ := json.MarshalIndent(expect, "", " ")
|
||||
a, _ := json.MarshalIndent(actual, "", " ")
|
||||
t.Errorf("expected graph:\n%s\ngot:\n%s", string(e), string(a))
|
||||
if diff := cmp.Diff(actual, expect); diff != "" {
|
||||
t.Errorf("Bad graph; diff (-got +want):\n%s", diff)
|
||||
}
|
||||
}
|
||||
expectIndex := func(t *testing.T, g *Graph, expect map[string][]string) {
|
||||
|
|
@ -478,10 +477,8 @@ func TestIndex2(t *testing.T) {
|
|||
sort.Strings(sortedValues)
|
||||
actual[toString(g, from)] = sortedValues
|
||||
}
|
||||
if !reflect.DeepEqual(expect, actual) {
|
||||
e, _ := json.MarshalIndent(expect, "", " ")
|
||||
a, _ := json.MarshalIndent(actual, "", " ")
|
||||
t.Errorf("expected index:\n%s\ngot:\n%s", string(e), string(a))
|
||||
if diff := cmp.Diff(actual, expect); diff != "" {
|
||||
t.Errorf("Bad index; diff (-got +want):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -889,6 +886,63 @@ func TestIndex2(t *testing.T) {
|
|||
},
|
||||
expectedIndex: map[string][]string{},
|
||||
},
|
||||
{
|
||||
desc: "podcertificaterequest adding",
|
||||
startingGraph: NewTestGraph(),
|
||||
graphTransformer: func(g *Graph) {
|
||||
g.AddPodCertificateRequest(pcr("foo", "pcr1", "pod1", "sa1", "node1"))
|
||||
g.AddPodCertificateRequest(pcr("foo", "pcr2", "pod1", "sa1", "node1"))
|
||||
g.AddPodCertificateRequest(pcr("foo", "pcr3", "pod2", "sa2", "node1"))
|
||||
g.AddPodCertificateRequest(pcr("foo", "pcr4", "pod4", "sa4", "node2"))
|
||||
},
|
||||
expectedGraph: map[string][]string{
|
||||
"node:node1": {},
|
||||
"node:node2": {},
|
||||
"podcertificaterequest:foo/pcr1": {"node:node1"},
|
||||
"podcertificaterequest:foo/pcr2": {"node:node1"},
|
||||
"podcertificaterequest:foo/pcr3": {"node:node1"},
|
||||
"podcertificaterequest:foo/pcr4": {"node:node2"},
|
||||
},
|
||||
expectedIndex: map[string][]string{},
|
||||
},
|
||||
{
|
||||
desc: "podcertificaterequest deleting",
|
||||
startingGraph: func() *Graph {
|
||||
g := NewTestGraph()
|
||||
g.AddPodCertificateRequest(pcr("foo", "pcr1", "pod1", "sa1", "node1"))
|
||||
g.AddPodCertificateRequest(pcr("foo", "pcr2", "pod1", "sa1", "node1"))
|
||||
g.AddPodCertificateRequest(pcr("foo", "pcr3", "pod2", "sa2", "node1"))
|
||||
g.AddPodCertificateRequest(pcr("foo", "pcr4", "pod4", "sa4", "node2"))
|
||||
return g
|
||||
}(),
|
||||
graphTransformer: func(g *Graph) {
|
||||
g.DeletePodCertificateRequest(pcr("foo", "pcr3", "", "", ""))
|
||||
g.DeletePodCertificateRequest(pcr("foo", "pcr4", "", "", ""))
|
||||
},
|
||||
expectedGraph: map[string][]string{
|
||||
"node:node1": {},
|
||||
"podcertificaterequest:foo/pcr1": {"node:node1"},
|
||||
"podcertificaterequest:foo/pcr2": {"node:node1"},
|
||||
},
|
||||
expectedIndex: map[string][]string{},
|
||||
},
|
||||
{
|
||||
desc: "podcertificaterequest deleting (check namespace/name ordering)",
|
||||
startingGraph: func() *Graph {
|
||||
g := NewTestGraph()
|
||||
g.AddPodCertificateRequest(pcr("foo", "bar", "pod1", "sa1", "node1"))
|
||||
g.AddPodCertificateRequest(pcr("bar", "foo", "pod2", "sa2", "node2"))
|
||||
return g
|
||||
}(),
|
||||
graphTransformer: func(g *Graph) {
|
||||
g.DeletePodCertificateRequest(pcr("foo", "bar", "", "", ""))
|
||||
},
|
||||
expectedGraph: map[string][]string{
|
||||
"node:node2": {},
|
||||
"podcertificaterequest:bar/foo": {"node:node2"},
|
||||
},
|
||||
expectedIndex: map[string][]string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
|
|
@ -899,3 +953,19 @@ func TestIndex2(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func pcr(namespace, name, podName, saName, nodeName string) *certsv1alpha1.PodCertificateRequest {
|
||||
pcr := &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
UID: types.UID(fmt.Sprintf("pcr%suid", name)),
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
PodName: podName,
|
||||
ServiceAccountName: saName,
|
||||
NodeName: types.NodeName(nodeName),
|
||||
},
|
||||
}
|
||||
return pcr
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ import (
|
|||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
certsapi "k8s.io/kubernetes/pkg/apis/certificates"
|
||||
coordapi "k8s.io/kubernetes/pkg/apis/coordination"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
resourceapi "k8s.io/kubernetes/pkg/apis/resource"
|
||||
|
|
@ -52,6 +53,7 @@ import (
|
|||
// node <- pod <- pvc <- pv
|
||||
// node <- pod <- pvc <- pv <- secret
|
||||
// node <- pod <- ResourceClaim
|
||||
// node <- pcr
|
||||
// 4. If a request is for a resourceslice, then authorize access if there is an
|
||||
// edge from the existing slice object to the node, which is the case if the
|
||||
// existing object has the node in its NodeName field. For create, the access gets
|
||||
|
|
@ -93,6 +95,7 @@ var (
|
|||
svcAcctResource = api.Resource("serviceaccounts")
|
||||
leaseResource = coordapi.Resource("leases")
|
||||
csiNodeResource = storageapi.Resource("csinodes")
|
||||
pcrResource = certsapi.Resource("podcertificaterequests")
|
||||
)
|
||||
|
||||
func (r *NodeAuthorizer) RulesFor(ctx context.Context, user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) {
|
||||
|
|
@ -150,6 +153,10 @@ func (r *NodeAuthorizer) Authorize(ctx context.Context, attrs authorizer.Attribu
|
|||
if r.features.Enabled(features.AuthorizeNodeWithSelectors) {
|
||||
return r.authorizePod(nodeName, attrs)
|
||||
}
|
||||
case pcrResource:
|
||||
if r.features.Enabled(features.PodCertificateRequest) && r.features.Enabled(features.AuthorizeNodeWithSelectors) {
|
||||
return r.authorizePodCertificateRequest(nodeName, attrs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -232,13 +239,13 @@ func (r *NodeAuthorizer) authorize(nodeName string, startingType vertexType, att
|
|||
}
|
||||
|
||||
// authorizeServiceAccount authorizes
|
||||
// - "get" requests to serviceaccounts when KubeletServiceAccountTokenForCredentialProviders feature is enabled
|
||||
// - "get" requests to serviceaccounts when KubeletServiceAccountTokenForCredentialProviders or PodCertificateRequest features are enabled
|
||||
// - "create" requests to serviceaccounts 'token' subresource of pods running on a node
|
||||
func (r *NodeAuthorizer) authorizeServiceAccount(nodeName string, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
verb := attrs.GetVerb()
|
||||
|
||||
if verb == "get" && attrs.GetSubresource() == "" {
|
||||
if !r.features.Enabled(features.KubeletServiceAccountTokenForCredentialProviders) {
|
||||
if verb == "get" && len(attrs.GetSubresource()) == 0 {
|
||||
if !(r.features.Enabled(features.KubeletServiceAccountTokenForCredentialProviders) || r.features.Enabled(features.PodCertificateRequest)) {
|
||||
klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
|
||||
return authorizer.DecisionNoOpinion, "not allowed to get service accounts", nil
|
||||
}
|
||||
|
|
@ -376,6 +383,34 @@ func (r *NodeAuthorizer) authorizeResourceSlice(nodeName string, attrs authorize
|
|||
}
|
||||
}
|
||||
|
||||
func (r *NodeAuthorizer) authorizePodCertificateRequest(nodeName string, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
if len(attrs.GetSubresource()) != 0 {
|
||||
return authorizer.DecisionNoOpinion, "nodes may not access the status subresource of PodCertificateRequests", nil
|
||||
}
|
||||
|
||||
switch attrs.GetVerb() {
|
||||
case "create":
|
||||
// Creates are further restricted by the noderestriction admission plugin.
|
||||
return authorizer.DecisionAllow, "", nil
|
||||
case "get":
|
||||
return r.authorize(nodeName, pcrVertexType, attrs)
|
||||
case "list", "watch":
|
||||
// Allow requests that have a fieldselector restricting to this node.
|
||||
reqs, _ := attrs.GetFieldSelector()
|
||||
for _, req := range reqs {
|
||||
if req.Field == "spec.nodeName" && req.Operator == selection.Equals && req.Value == nodeName {
|
||||
return authorizer.DecisionAllow, "", nil
|
||||
}
|
||||
}
|
||||
// deny otherwise
|
||||
klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
|
||||
return authorizer.DecisionNoOpinion, "can only list/watch podcertificaterequests with nodeName field selector", nil
|
||||
}
|
||||
|
||||
klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
|
||||
return authorizer.DecisionNoOpinion, fmt.Sprintf("nodes may not %s podcertificaterequests", attrs.GetVerb()), nil
|
||||
}
|
||||
|
||||
// authorizeNode authorizes node requests to Node API objects
|
||||
func (r *NodeAuthorizer) authorizeNode(nodeName string, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
switch attrs.GetSubresource() {
|
||||
|
|
|
|||
|
|
@ -27,11 +27,13 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
certsv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
resourceapi "k8s.io/api/resource/v1beta1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
|
|
@ -62,9 +64,10 @@ func TestNodeAuthorizer(t *testing.T) {
|
|||
uniqueResourceClaimTemplatesPerPod: 1,
|
||||
uniqueResourceClaimTemplatesWithClaimPerPod: 1,
|
||||
nodeResourceSlicesPerNode: 2,
|
||||
podCertificateRequestsPerPod: 2,
|
||||
}
|
||||
nodes, pods, pvs, attachments, slices := generate(opts)
|
||||
populate(g, nodes, pods, pvs, attachments, slices)
|
||||
nodes, pods, pvs, attachments, slices, pcrs := generate(opts)
|
||||
populate(g, nodes, pods, pvs, attachments, slices, pcrs)
|
||||
|
||||
identifier := nodeidentifier.NewDefaultNodeIdentifier()
|
||||
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules())
|
||||
|
|
@ -97,6 +100,14 @@ func TestNodeAuthorizer(t *testing.T) {
|
|||
return f
|
||||
}
|
||||
|
||||
podCertificateProjectionEnabled := func(t testing.TB) featuregate.FeatureGate {
|
||||
f := utilfeature.DefaultFeatureGate.DeepCopy()
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, f, genericfeatures.AuthorizeWithSelectors, true)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, f, features.AuthorizeNodeWithSelectors, true)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, f, features.PodCertificateRequest, true)
|
||||
return f
|
||||
}
|
||||
|
||||
featureVariants := []struct {
|
||||
suffix string
|
||||
features func(t testing.TB) featuregate.FeatureGate
|
||||
|
|
@ -267,6 +278,90 @@ func TestNodeAuthorizer(t *testing.T) {
|
|||
features: serviceAccountTokenForCredentialProvidersDisabled,
|
||||
expectReason: "can only create tokens for individual service accounts",
|
||||
},
|
||||
{
|
||||
name: "allowed svcacct token create when PodCertificateProjection is enabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "create", Resource: "serviceaccounts", Subresource: "token", Name: "svcacct0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionAllow,
|
||||
features: podCertificateProjectionEnabled,
|
||||
},
|
||||
{
|
||||
name: "allowed svcacct get when PodCertificateProjection is enabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "serviceaccounts", Name: "svcacct0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionAllow,
|
||||
features: podCertificateProjectionEnabled,
|
||||
},
|
||||
{
|
||||
name: "disallowed pcr create when PodCertificateProjection is disabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "create", APIGroup: "certificates.k8s.io", Resource: "podcertificaterequests", Name: "pcr0-pod0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
},
|
||||
{
|
||||
name: "allowed pcr create when PodCertificateProjection is enabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "create", APIGroup: "certificates.k8s.io", Resource: "podcertificaterequests", Name: "pcr0-pod0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionAllow,
|
||||
features: podCertificateProjectionEnabled,
|
||||
},
|
||||
{
|
||||
name: "disallowed pcr get when PodCertificateProjection is disabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", APIGroup: "certificates.k8s.io", Resource: "podcertificaterequests", Name: "pcr0-pod0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
},
|
||||
{
|
||||
name: "allowed pcr get when PodCertificateProjection is enabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", APIGroup: "certificates.k8s.io", Resource: "podcertificaterequests", Name: "pcr0-pod0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionAllow,
|
||||
features: podCertificateProjectionEnabled,
|
||||
},
|
||||
{
|
||||
name: "disallowed pcr list when PodCertificateProjection is disabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", APIGroup: "certificates.k8s.io", Resource: "podcertificaterequests", Name: "pcr0-pod0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
},
|
||||
{
|
||||
name: "disallowed pcr list (un-filtered) when PodCertificateProjection is enabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", APIGroup: "certificates.k8s.io", Resource: "podcertificaterequests", Name: "pcr0-pod0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
expectReason: "can only list/watch podcertificaterequests with nodeName field selector",
|
||||
features: podCertificateProjectionEnabled,
|
||||
},
|
||||
{
|
||||
name: "disallowed pcr list (filtered to other node) when PodCertificateProjection is enabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", APIGroup: "certificates.k8s.io", Resource: "podcertificaterequests", Name: "pcr0-pod0-node0", Namespace: "ns0", FieldSelectorRequirements: fields.Requirements{{Field: "spec.nodeName", Operator: "=", Value: "othernode"}}},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
expectReason: "can only list/watch podcertificaterequests with nodeName field selector",
|
||||
features: podCertificateProjectionEnabled,
|
||||
},
|
||||
{
|
||||
name: "allowed pcr list (filtered to correct node) when PodCertificateProjection is enabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", APIGroup: "certificates.k8s.io", Resource: "podcertificaterequests", Name: "pcr0-pod0-node0", Namespace: "ns0", FieldSelectorRequirements: fields.Requirements{{Field: "spec.nodeName", Operator: "=", Value: "node0"}}},
|
||||
expect: authorizer.DecisionAllow,
|
||||
features: podCertificateProjectionEnabled,
|
||||
},
|
||||
{
|
||||
name: "disallowed pcr watch when PodCertificateProjection is disabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "watch", APIGroup: "certificates.k8s.io", Resource: "podcertificaterequests", Name: "pcr0-pod0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
},
|
||||
{
|
||||
name: "disallowed pcr watch (un-filtered) when PodCertificateProjection is enabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "watch", APIGroup: "certificates.k8s.io", Resource: "podcertificaterequests", Name: "pcr0-pod0-node0", Namespace: "ns0"},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
expectReason: "can only list/watch podcertificaterequests with nodeName field selector",
|
||||
features: podCertificateProjectionEnabled,
|
||||
},
|
||||
{
|
||||
name: "disallowed pcr watch (filtered to other node) when PodCertificateProjection is enabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "watch", APIGroup: "certificates.k8s.io", Resource: "podcertificaterequests", Name: "pcr0-pod0-node0", Namespace: "ns0", FieldSelectorRequirements: fields.Requirements{{Field: "spec.nodeName", Operator: "=", Value: "othernode"}}},
|
||||
expect: authorizer.DecisionNoOpinion,
|
||||
expectReason: "can only list/watch podcertificaterequests with nodeName field selector",
|
||||
features: podCertificateProjectionEnabled,
|
||||
},
|
||||
{
|
||||
name: "allowed pcr watch (filtered to correct node) when PodCertificateProjection is enabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "watch", APIGroup: "certificates.k8s.io", Resource: "podcertificaterequests", Name: "pcr0-pod0-node0", Namespace: "ns0", FieldSelectorRequirements: fields.Requirements{{Field: "spec.nodeName", Operator: "=", Value: "node0"}}},
|
||||
expect: authorizer.DecisionAllow,
|
||||
features: podCertificateProjectionEnabled,
|
||||
},
|
||||
{
|
||||
name: "disallowed get lease in namespace other than kube-node-lease - feature enabled",
|
||||
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "leases", APIGroup: "coordination.k8s.io", Name: "node0", Namespace: "foo"},
|
||||
|
|
@ -791,7 +886,11 @@ func TestNodeAuthorizer(t *testing.T) {
|
|||
} else {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
authz.features = tc.features(t)
|
||||
decision, reason, _ := authz.Authorize(context.Background(), tc.attrs)
|
||||
decision, reason, err := authz.Authorize(context.Background(), tc.attrs)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error calling Authorize: %v", err)
|
||||
}
|
||||
|
||||
if decision != tc.expect {
|
||||
t.Errorf("expected %v, got %v (%s)", tc.expect, decision, reason)
|
||||
}
|
||||
|
|
@ -1061,6 +1160,8 @@ type sampleDataOpts struct {
|
|||
uniqueResourceClaimTemplatesWithClaimPerPod int
|
||||
|
||||
nodeResourceSlicesPerNode int
|
||||
|
||||
podCertificateRequestsPerPod int
|
||||
}
|
||||
|
||||
func mustParseFields(s string) fields.Requirements {
|
||||
|
|
@ -1085,12 +1186,12 @@ func BenchmarkPopulationAllocation(b *testing.B) {
|
|||
uniquePVCsPerPod: 1,
|
||||
}
|
||||
|
||||
nodes, pods, pvs, attachments, slices := generate(opts)
|
||||
nodes, pods, pvs, attachments, slices, pcrs := generate(opts)
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
g := NewGraph()
|
||||
populate(g, nodes, pods, pvs, attachments, slices)
|
||||
populate(g, nodes, pods, pvs, attachments, slices, pcrs)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1116,14 +1217,14 @@ func BenchmarkPopulationRetention(b *testing.B) {
|
|||
uniquePVCsPerPod: 1,
|
||||
}
|
||||
|
||||
nodes, pods, pvs, attachments, slices := generate(opts)
|
||||
nodes, pods, pvs, attachments, slices, pcrs := generate(opts)
|
||||
// Garbage collect before the first iteration
|
||||
runtime.GC()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
g := NewGraph()
|
||||
populate(g, nodes, pods, pvs, attachments, slices)
|
||||
populate(g, nodes, pods, pvs, attachments, slices, pcrs)
|
||||
|
||||
if i == 0 {
|
||||
f, _ := os.Create("BenchmarkPopulationRetention.profile")
|
||||
|
|
@ -1154,9 +1255,9 @@ func BenchmarkWriteIndexMaintenance(b *testing.B) {
|
|||
sharedPVCsPerPod: 0,
|
||||
uniquePVCsPerPod: 1,
|
||||
}
|
||||
nodes, pods, pvs, attachments, slices := generate(opts)
|
||||
nodes, pods, pvs, attachments, slices, pcrs := generate(opts)
|
||||
g := NewGraph()
|
||||
populate(g, nodes, pods, pvs, attachments, slices)
|
||||
populate(g, nodes, pods, pvs, attachments, slices, pcrs)
|
||||
// Garbage collect before the first iteration
|
||||
runtime.GC()
|
||||
b.ResetTimer()
|
||||
|
|
@ -1180,7 +1281,7 @@ func BenchmarkUnauthorizedRequests(b *testing.B) {
|
|||
podsPerNode: 1,
|
||||
sharedConfigMapsPerPod: 1,
|
||||
}
|
||||
nodes, pods, pvs, attachments, slices := generate(opts)
|
||||
nodes, pods, pvs, attachments, slices, pcrs := generate(opts)
|
||||
|
||||
// Create an additional Node that doesn't have access to a shared ConfigMap
|
||||
// that all the other Nodes are authorized to read.
|
||||
|
|
@ -1195,7 +1296,7 @@ func BenchmarkUnauthorizedRequests(b *testing.B) {
|
|||
pods = append(pods, pod)
|
||||
|
||||
g := NewGraph()
|
||||
populate(g, nodes, pods, pvs, attachments, slices)
|
||||
populate(g, nodes, pods, pvs, attachments, slices, pcrs)
|
||||
|
||||
identifier := nodeidentifier.NewDefaultNodeIdentifier()
|
||||
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules())
|
||||
|
|
@ -1232,8 +1333,8 @@ func BenchmarkAuthorization(b *testing.B) {
|
|||
sharedPVCsPerPod: 0,
|
||||
uniquePVCsPerPod: 1,
|
||||
}
|
||||
nodes, pods, pvs, attachments, slices := generate(opts)
|
||||
populate(g, nodes, pods, pvs, attachments, slices)
|
||||
nodes, pods, pvs, attachments, slices, pcrs := generate(opts)
|
||||
populate(g, nodes, pods, pvs, attachments, slices, pcrs)
|
||||
|
||||
identifier := nodeidentifier.NewDefaultNodeIdentifier()
|
||||
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules())
|
||||
|
|
@ -1298,7 +1399,7 @@ func BenchmarkAuthorization(b *testing.B) {
|
|||
},
|
||||
}
|
||||
|
||||
podToAdd, _ := generatePod("testwrite", "ns0", "node0", "default", opts, rand.Perm)
|
||||
podToAdd, _, _ := generatePod("testwrite", "ns0", "node0", "default", opts, rand.Perm)
|
||||
|
||||
b.ResetTimer()
|
||||
for _, testWriteContention := range []bool{false, true} {
|
||||
|
|
@ -1382,7 +1483,7 @@ func BenchmarkAuthorization(b *testing.B) {
|
|||
}
|
||||
}
|
||||
|
||||
func populate(graph *Graph, nodes []*corev1.Node, pods []*corev1.Pod, pvs []*corev1.PersistentVolume, attachments []*storagev1.VolumeAttachment, slices []*resourceapi.ResourceSlice) {
|
||||
func populate(graph *Graph, nodes []*corev1.Node, pods []*corev1.Pod, pvs []*corev1.PersistentVolume, attachments []*storagev1.VolumeAttachment, slices []*resourceapi.ResourceSlice, pcrs []*certsv1alpha1.PodCertificateRequest) {
|
||||
p := &graphPopulator{}
|
||||
p.graph = graph
|
||||
for _, pod := range pods {
|
||||
|
|
@ -1397,6 +1498,9 @@ func populate(graph *Graph, nodes []*corev1.Node, pods []*corev1.Pod, pvs []*cor
|
|||
for _, slice := range slices {
|
||||
p.addResourceSlice(slice)
|
||||
}
|
||||
for _, pcr := range pcrs {
|
||||
p.addPCR(pcr)
|
||||
}
|
||||
}
|
||||
|
||||
func randomSubset(a, b int, randPerm func(int) []int) []int {
|
||||
|
|
@ -1410,12 +1514,13 @@ func randomSubset(a, b int, randPerm func(int) []int) []int {
|
|||
// the secret/configmap/pvc/node references in the pod and pv objects are named to indicate the connections between the objects.
|
||||
// for example, secret0-pod0-node0 is a secret referenced by pod0 which is bound to node0.
|
||||
// when populated into the graph, the node authorizer should allow node0 to access that secret, but not node1.
|
||||
func generate(opts *sampleDataOpts) ([]*corev1.Node, []*corev1.Pod, []*corev1.PersistentVolume, []*storagev1.VolumeAttachment, []*resourceapi.ResourceSlice) {
|
||||
func generate(opts *sampleDataOpts) ([]*corev1.Node, []*corev1.Pod, []*corev1.PersistentVolume, []*storagev1.VolumeAttachment, []*resourceapi.ResourceSlice, []*certsv1alpha1.PodCertificateRequest) {
|
||||
nodes := make([]*corev1.Node, 0, opts.nodes)
|
||||
pods := make([]*corev1.Pod, 0, opts.nodes*opts.podsPerNode)
|
||||
pvs := make([]*corev1.PersistentVolume, 0, (opts.nodes*opts.podsPerNode*opts.uniquePVCsPerPod)+(opts.sharedPVCsPerPod*opts.namespaces))
|
||||
attachments := make([]*storagev1.VolumeAttachment, 0, opts.nodes*opts.attachmentsPerNode)
|
||||
slices := make([]*resourceapi.ResourceSlice, 0, opts.nodes*opts.nodeResourceSlicesPerNode)
|
||||
pcrs := make([]*certsv1alpha1.PodCertificateRequest, 0, opts.nodes*opts.podsPerNode*opts.podCertificateRequestsPerPod)
|
||||
|
||||
r := rand.New(rand.NewSource(12345))
|
||||
|
||||
|
|
@ -1426,9 +1531,10 @@ func generate(opts *sampleDataOpts) ([]*corev1.Node, []*corev1.Pod, []*corev1.Pe
|
|||
namespace := fmt.Sprintf("ns%d", p%opts.namespaces)
|
||||
svcAccountName := fmt.Sprintf("svcacct%d-%s", p, nodeName)
|
||||
|
||||
pod, podPVs := generatePod(name, namespace, nodeName, svcAccountName, opts, r.Perm)
|
||||
pod, podPVs, podPCRs := generatePod(name, namespace, nodeName, svcAccountName, opts, r.Perm)
|
||||
pods = append(pods, pod)
|
||||
pvs = append(pvs, podPVs...)
|
||||
pcrs = append(pcrs, podPCRs...)
|
||||
}
|
||||
for a := 0; a < opts.attachmentsPerNode; a++ {
|
||||
attachment := &storagev1.VolumeAttachment{}
|
||||
|
|
@ -1453,11 +1559,12 @@ func generate(opts *sampleDataOpts) ([]*corev1.Node, []*corev1.Pod, []*corev1.Pe
|
|||
slices = append(slices, slice)
|
||||
}
|
||||
}
|
||||
return nodes, pods, pvs, attachments, slices
|
||||
return nodes, pods, pvs, attachments, slices, pcrs
|
||||
}
|
||||
|
||||
func generatePod(name, namespace, nodeName, svcAccountName string, opts *sampleDataOpts, randPerm func(int) []int) (*corev1.Pod, []*corev1.PersistentVolume) {
|
||||
func generatePod(name, namespace, nodeName, svcAccountName string, opts *sampleDataOpts, randPerm func(int) []int) (*corev1.Pod, []*corev1.PersistentVolume, []*certsv1alpha1.PodCertificateRequest) {
|
||||
pvs := make([]*corev1.PersistentVolume, 0, opts.uniquePVCsPerPod+opts.sharedPVCsPerPod)
|
||||
pcrs := make([]*certsv1alpha1.PodCertificateRequest, 0, opts.podCertificateRequestsPerPod)
|
||||
|
||||
pod := &corev1.Pod{}
|
||||
pod.Name = name
|
||||
|
|
@ -1544,5 +1651,21 @@ func generatePod(name, namespace, nodeName, svcAccountName string, opts *sampleD
|
|||
}})
|
||||
}
|
||||
|
||||
return pod, pvs
|
||||
for i := 0; i < opts.podCertificateRequestsPerPod; i++ {
|
||||
pcr := &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: pod.ObjectMeta.Namespace,
|
||||
Name: fmt.Sprintf("pcr%d-%s", i, pod.ObjectMeta.Name),
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
PodName: pod.ObjectMeta.Name,
|
||||
PodUID: pod.ObjectMeta.UID,
|
||||
ServiceAccountName: pod.Spec.ServiceAccountName,
|
||||
NodeName: types.NodeName(pod.Spec.NodeName),
|
||||
},
|
||||
}
|
||||
pcrs = append(pcrs, pcr)
|
||||
}
|
||||
|
||||
return pod, pvs, pcrs
|
||||
}
|
||||
|
|
|
|||
|
|
@ -430,6 +430,12 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
|
|||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "podcertificaterequestcleaner"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(certificatesGroup).Resources("podcertificaterequests").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pvc-protection-controller"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
|
|
|
|||
|
|
@ -271,6 +271,11 @@ func NodeRules() []rbacv1.PolicyRule {
|
|||
if utilfeature.DefaultFeatureGate.Enabled(features.KubeletServiceAccountTokenForCredentialProviders) {
|
||||
nodePolicyRules = append(nodePolicyRules, rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie())
|
||||
}
|
||||
// Kubelet needs to create arbitrary PodCertificateRequests to implement
|
||||
// podCertificate volumes.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodCertificateRequest) {
|
||||
nodePolicyRules = append(nodePolicyRules, rbacv1helpers.NewRule("get", "list", "watch", "create").Groups(certificatesGroup).Resources("podcertificaterequests").RuleOrDie())
|
||||
}
|
||||
|
||||
return nodePolicyRules
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1231,6 +1231,15 @@ items:
|
|||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- certificates.k8s.io
|
||||
resources:
|
||||
- podcertificaterequests
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
|
|
|
|||
|
|
@ -320,6 +320,22 @@ items:
|
|||
- kind: ServiceAccount
|
||||
name: pod-garbage-collector
|
||||
namespace: kube-system
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:controller:podcertificaterequestcleaner
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:controller:podcertificaterequestcleaner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: podcertificaterequestcleaner
|
||||
namespace: kube-system
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
|
|
|
|||
|
|
@ -1037,6 +1037,24 @@ items:
|
|||
- pods/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:controller:podcertificaterequestcleaner
|
||||
rules:
|
||||
- apiGroups:
|
||||
- certificates.k8s.io
|
||||
resources:
|
||||
- podcertificaterequests
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
|
|
|
|||
|
|
@ -53,6 +53,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&ClusterTrustBundle{},
|
||||
&ClusterTrustBundleList{},
|
||||
&PodCertificateRequest{},
|
||||
&PodCertificateRequestList{},
|
||||
)
|
||||
|
||||
// Add the watch version that applies
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package v1alpha1
|
|||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
|
|
@ -106,3 +107,233 @@ type ClusterTrustBundleList struct {
|
|||
// items is a collection of ClusterTrustBundle objects
|
||||
Items []ClusterTrustBundle `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.32
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodCertificateRequest encodes a pod requesting a certificate from a given
|
||||
// signer.
|
||||
//
|
||||
// Kubelets use this API to implement podCertificate projected volumes
|
||||
type PodCertificateRequest struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// metadata contains the object metadata.
|
||||
//
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// spec contains the details about the certificate being requested.
|
||||
Spec PodCertificateRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
|
||||
|
||||
// status contains the issued certificate, and a standard set of conditions.
|
||||
// +optional
|
||||
Status PodCertificateRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// PodCertificateRequestSpec describes the certificate request. All fields are
|
||||
// immutable after creation.
|
||||
type PodCertificateRequestSpec struct {
|
||||
// signerName indicates the requested signer.
|
||||
//
|
||||
// All signer names beginning with `kubernetes.io` are reserved for use by
|
||||
// the Kubernetes project. There is currently one well-known signer
|
||||
// documented by the Kubernetes project,
|
||||
// `kubernetes.io/kube-apiserver-client-pod`, which will issue client
|
||||
// certificates understood by kube-apiserver. It is currently
|
||||
// unimplemented.
|
||||
//
|
||||
// +required
|
||||
SignerName string `json:"signerName" protobuf:"bytes,1,opt,name=signerName"`
|
||||
|
||||
// podName is the name of the pod into which the certificate will be mounted.
|
||||
//
|
||||
// +required
|
||||
PodName string `json:"podName" protobuf:"bytes,2,opt,name=podName"`
|
||||
// podUID is the UID of the pod into which the certificate will be mounted.
|
||||
//
|
||||
// +required
|
||||
PodUID types.UID `json:"podUID" protobuf:"bytes,3,opt,name=podUID"`
|
||||
|
||||
// serviceAccountName is the name of the service account the pod is running as.
|
||||
//
|
||||
// +required
|
||||
ServiceAccountName string `json:"serviceAccountName" protobuf:"bytes,4,opt,name=serviceAccountName"`
|
||||
// serviceAccountUID is the UID of the service account the pod is running as.
|
||||
//
|
||||
// +required
|
||||
ServiceAccountUID types.UID `json:"serviceAccountUID" protobuf:"bytes,5,opt,name=serviceAccountUID"`
|
||||
|
||||
// nodeName is the name of the node the pod is assigned to.
|
||||
//
|
||||
// +required
|
||||
NodeName types.NodeName `json:"nodeName" protobuf:"bytes,6,opt,name=nodeName"`
|
||||
// nodeUID is the UID of the node the pod is assigned to.
|
||||
//
|
||||
// +required
|
||||
NodeUID types.UID `json:"nodeUID" protobuf:"bytes,7,opt,name=nodeUID"`
|
||||
|
||||
// maxExpirationSeconds is the maximum lifetime permitted for the
|
||||
// certificate.
|
||||
//
|
||||
// If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
|
||||
// will reject values shorter than 3600 (1 hour). The maximum allowable
|
||||
// value is 7862400 (91 days).
|
||||
//
|
||||
// The signer implementation is then free to issue a certificate with any
|
||||
// lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
|
||||
// seconds (1 hour). This constraint is enforced by kube-apiserver.
|
||||
// `kubernetes.io` signers will never issue certificates with a lifetime
|
||||
// longer than 24 hours.
|
||||
//
|
||||
// +optional
|
||||
// +default=86400
|
||||
MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,8,opt,name=maxExpirationSeconds"`
|
||||
|
||||
// pkixPublicKey is the PKIX-serialized public key the signer will issue the
|
||||
// certificate to.
|
||||
//
|
||||
// The key must be one of RSA3072, RSA4096, ECDSAP256, ECDSAP384, ECDSAP521,
|
||||
// or ED25519. Note that this list may be expanded in the future.
|
||||
//
|
||||
// Signer implementations do not need to support all key types supported by
|
||||
// kube-apiserver and kubelet. If a signer does not support the key type
|
||||
// used for a given PodCertificateRequest, it must deny the request by
|
||||
// setting a status.conditions entry with a type of "Denied" and a reason of
|
||||
// "UnsupportedKeyType". It may also suggest a key type that it does support
|
||||
// in the message field.
|
||||
//
|
||||
// +required
|
||||
PKIXPublicKey []byte `json:"pkixPublicKey" protobuf:"bytes,9,opt,name=pkixPublicKey"`
|
||||
|
||||
// proofOfPossession proves that the requesting kubelet holds the private
|
||||
// key corresponding to pkixPublicKey.
|
||||
//
|
||||
// It is contructed by signing the ASCII bytes of the pod's UID using
|
||||
// `pkixPublicKey`.
|
||||
//
|
||||
// kube-apiserver validates the proof of possession during creation of the
|
||||
// PodCertificateRequest.
|
||||
//
|
||||
// If the key is an RSA key, then the signature is over the ASCII bytes of
|
||||
// the pod UID, using RSASSA-PSS from RFC 8017 (as implemented by the golang
|
||||
// function crypto/rsa.SignPSS with nil options).
|
||||
//
|
||||
// If the key is an ECDSA key, then the signature is as described by [SEC 1,
|
||||
// Version 2.0](https://www.secg.org/sec1-v2.pdf) (as implemented by the
|
||||
// golang library function crypto/ecdsa.SignASN1)
|
||||
//
|
||||
// If the key is an ED25519 key, the the signature is as described by the
|
||||
// [ED25519 Specification](https://ed25519.cr.yp.to/) (as implemented by
|
||||
// the golang library crypto/ed25519.Sign).
|
||||
//
|
||||
// +required
|
||||
ProofOfPossession []byte `json:"proofOfPossession" protobuf:"bytes,10,opt,name=proofOfPossession"`
|
||||
}
|
||||
|
||||
// PodCertificateRequestStatus describes the status of the request, and holds
|
||||
// the certificate data if the request is issued.
|
||||
type PodCertificateRequestStatus struct {
|
||||
// conditions applied to the request.
|
||||
//
|
||||
// The types "Issued", "Denied", and "Failed" have special handling. At
|
||||
// most one of these conditions may be present, and they must have status
|
||||
// "True".
|
||||
//
|
||||
// If the request is denied with `Reason=UnsupportedKeyType`, the signer may
|
||||
// suggest a key type that will work in the message field.
|
||||
//
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
// +optional
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
||||
|
||||
// certificateChain is populated with an issued certificate by the signer.
|
||||
// This field is set via the /status subresource. Once populated, this field
|
||||
// is immutable.
|
||||
//
|
||||
// If the certificate signing request is denied, a condition of type
|
||||
// "Denied" is added and this field remains empty. If the signer cannot
|
||||
// issue the certificate, a condition of type "Failed" is added and this
|
||||
// field remains empty.
|
||||
//
|
||||
// Validation requirements:
|
||||
// 1. certificateChain must consist of one or more PEM-formatted certificates.
|
||||
// 2. Each entry must be a valid PEM-wrapped, DER-encoded ASN.1 Certificate as
|
||||
// described in section 4 of RFC5280.
|
||||
//
|
||||
// If more than one block is present, and the definition of the requested
|
||||
// spec.signerName does not indicate otherwise, the first block is the
|
||||
// issued certificate, and subsequent blocks should be treated as
|
||||
// intermediate certificates and presented in TLS handshakes. When
|
||||
// projecting the chain into a pod volume, kubelet will drop any data
|
||||
// in-between the PEM blocks, as well as any PEM block headers.
|
||||
//
|
||||
// +optional
|
||||
CertificateChain string `json:"certificateChain,omitempty" protobuf:"bytes,2,opt,name=certificateChain"`
|
||||
|
||||
// notBefore is the time at which the certificate becomes valid. The value
|
||||
// must be the same as the notBefore value in the leaf certificate in
|
||||
// certificateChain. This field is set via the /status subresource. Once
|
||||
// populated, it is immutable. The signer must set this field at the same
|
||||
// time it sets certificateChain.
|
||||
//
|
||||
// +optional
|
||||
NotBefore *metav1.Time `json:"notBefore,omitempty" protobuf:"bytes,4,opt,name=notBefore"`
|
||||
|
||||
// beginRefreshAt is the time at which the kubelet should begin trying to
|
||||
// refresh the certificate. This field is set via the /status subresource,
|
||||
// and must be set at the same time as certificateChain. Once populated,
|
||||
// this field is immutable.
|
||||
//
|
||||
// This field is only a hint. Kubelet may start refreshing before or after
|
||||
// this time if necessary.
|
||||
//
|
||||
// +optional
|
||||
BeginRefreshAt *metav1.Time `json:"beginRefreshAt,omitempty" protobuf:"bytes,5,opt,name=beginRefreshAt"`
|
||||
|
||||
// notAfter is the time at which the certificate expires. The value must be
|
||||
// the same as the notAfter value in the leaf certificate in
|
||||
// certificateChain. This field is set via the /status subresource. Once
|
||||
// populated, it is immutable. The signer must set this field at the same
|
||||
// time it sets certificateChain.
|
||||
//
|
||||
// +optional
|
||||
NotAfter *metav1.Time `json:"notAfter,omitempty" protobuf:"bytes,6,opt,name=notAfter"`
|
||||
}
|
||||
|
||||
// Well-known condition types for PodCertificateRequests
|
||||
const (
|
||||
// Denied indicates the request was denied by the signer.
|
||||
PodCertificateRequestConditionTypeDenied string = "Denied"
|
||||
// Failed indicates the signer failed to issue the certificate.
|
||||
PodCertificateRequestConditionTypeFailed string = "Failed"
|
||||
// Issued indicates the certificate has been issued.
|
||||
PodCertificateRequestConditionTypeIssued string = "Issued"
|
||||
)
|
||||
|
||||
// Well-known condition reasons for PodCertificateRequests
|
||||
const (
|
||||
// UnsupportedKeyType should be set on "Denied" conditions when the signer
|
||||
// doesn't support the key type of publicKey.
|
||||
PodCertificateRequestConditionUnsupportedKeyType string = "UnsupportedKeyType"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.32
|
||||
|
||||
// PodCertificateRequestList is a collection of PodCertificateRequest objects
|
||||
type PodCertificateRequestList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// metadata contains the list metadata.
|
||||
//
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// items is a collection of PodCertificateRequest objects
|
||||
Items []PodCertificateRequest `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1989,6 +1989,79 @@ type ClusterTrustBundleProjection struct {
|
|||
Path string `json:"path" protobuf:"bytes,4,rep,name=path"`
|
||||
}
|
||||
|
||||
// PodCertificateProjection provides a private key and X.509 certificate in the
|
||||
// pod filesystem.
|
||||
type PodCertificateProjection struct {
|
||||
// Kubelet's generated CSRs will be addressed to this signer.
|
||||
//
|
||||
// +required
|
||||
SignerName string `json:"signerName,omitempty" protobuf:"bytes,1,rep,name=signerName"`
|
||||
|
||||
// The type of keypair Kubelet will generate for the pod.
|
||||
//
|
||||
// Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384",
|
||||
// "ECDSAP521", and "ED25519".
|
||||
//
|
||||
// +required
|
||||
KeyType string `json:"keyType,omitempty" protobuf:"bytes,2,rep,name=keyType"`
|
||||
|
||||
// maxExpirationSeconds is the maximum lifetime permitted for the
|
||||
// certificate.
|
||||
//
|
||||
// Kubelet copies this value verbatim into the PodCertificateRequests it
|
||||
// generates for this projection.
|
||||
//
|
||||
// If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver
|
||||
// will reject values shorter than 3600 (1 hour). The maximum allowable
|
||||
// value is 7862400 (91 days).
|
||||
//
|
||||
// The signer implementation is then free to issue a certificate with any
|
||||
// lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600
|
||||
// seconds (1 hour). This constraint is enforced by kube-apiserver.
|
||||
// `kubernetes.io` signers will never issue certificates with a lifetime
|
||||
// longer than 24 hours.
|
||||
//
|
||||
// +optional
|
||||
MaxExpirationSeconds *int32 `json:"maxExpirationSeconds,omitempty" protobuf:"varint,3,opt,name=maxExpirationSeconds"`
|
||||
|
||||
// Write the credential bundle at this path in the projected volume.
|
||||
//
|
||||
// The credential bundle is a single file that contains multiple PEM blocks.
|
||||
// The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private
|
||||
// key.
|
||||
//
|
||||
// The remaining blocks are CERTIFICATE blocks, containing the issued
|
||||
// certificate chain from the signer (leaf and any intermediates).
|
||||
//
|
||||
// Using credentialBundlePath lets your Pod's application code make a single
|
||||
// atomic read that retrieves a consistent key and certificate chain. If you
|
||||
// project them to separate files, your application code will need to
|
||||
// additionally check that the leaf certificate was issued to the key.
|
||||
//
|
||||
// +optional
|
||||
CredentialBundlePath string `json:"credentialBundlePath,omitempty" protobuf:"bytes,4,rep,name=credentialBundlePath"`
|
||||
|
||||
// Write the key at this path in the projected volume.
|
||||
//
|
||||
// Most applications should use credentialBundlePath. When using keyPath
|
||||
// and certificateChainPath, your application needs to check that the key
|
||||
// and leaf certificate are consistent, because it is possible to read the
|
||||
// files mid-rotation.
|
||||
//
|
||||
// +optional
|
||||
KeyPath string `json:"keyPath,omitempty" protobuf:"bytes,5,rep,name=keyPath"`
|
||||
|
||||
// Write the certificate chain at this path in the projected volume.
|
||||
//
|
||||
// Most applications should use credentialBundlePath. When using keyPath
|
||||
// and certificateChainPath, your application needs to check that the key
|
||||
// and leaf certificate are consistent, because it is possible to read the
|
||||
// files mid-rotation.
|
||||
//
|
||||
// +optional
|
||||
CertificateChainPath string `json:"certificateChainPath,omitempty" protobuf:"bytes,6,rep,name=certificateChainPath"`
|
||||
}
|
||||
|
||||
// Represents a projected volume source
|
||||
type ProjectedVolumeSource struct {
|
||||
// sources is the list of volume projections. Each entry in this list
|
||||
|
|
@ -2039,6 +2112,44 @@ type VolumeProjection struct {
|
|||
// +featureGate=ClusterTrustBundleProjection
|
||||
// +optional
|
||||
ClusterTrustBundle *ClusterTrustBundleProjection `json:"clusterTrustBundle,omitempty" protobuf:"bytes,5,opt,name=clusterTrustBundle"`
|
||||
|
||||
// Projects an auto-rotating credential bundle (private key and certificate
|
||||
// chain) that the pod can use either as a TLS client or server.
|
||||
//
|
||||
// Kubelet generates a private key and uses it to send a
|
||||
// PodCertificateRequest to the named signer. Once the signer approves the
|
||||
// request and issues a certificate chain, Kubelet writes the key and
|
||||
// certificate chain to the pod filesystem. The pod does not start until
|
||||
// certificates have been issued for each podCertificate projected volume
|
||||
// source in its spec.
|
||||
//
|
||||
// Kubelet will begin trying to rotate the certificate at the time indicated
|
||||
// by the signer using the PodCertificateRequest.Status.BeginRefreshAt
|
||||
// timestamp.
|
||||
//
|
||||
// Kubelet can write a single file, indicated by the credentialBundlePath
|
||||
// field, or separate files, indicated by the keyPath and
|
||||
// certificateChainPath fields.
|
||||
//
|
||||
// The credential bundle is a single file in PEM format. The first PEM
|
||||
// entry is the private key (in PKCS#8 format), and the remaining PEM
|
||||
// entries are the certificate chain issued by the signer (typically,
|
||||
// signers will return their certificate chain in leaf-to-root order).
|
||||
//
|
||||
// Prefer using the credential bundle format, since your application code
|
||||
// can read it atomically. If you use keyPath and certificateChainPath,
|
||||
// your application must make two separate file reads. If these coincide
|
||||
// with a certificate rotation, it is possible that the private key and leaf
|
||||
// certificate you read may not correspond to each other. Your application
|
||||
// will need to check for this condition, and re-read until they are
|
||||
// consistent.
|
||||
//
|
||||
// The named signer controls chooses the format of the certificate it
|
||||
// issues; consult the signer implementation's documentation to learn how to
|
||||
// use the certificates it issues.
|
||||
//
|
||||
// +featureGate=PodCertificateProjection +optional
|
||||
PodCertificate *PodCertificateProjection `json:"podCertificate,omitempty" protobuf:"bytes,6,opt,name=podCertificate"`
|
||||
}
|
||||
|
||||
const (
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/registry/rest/resttest"
|
||||
|
|
@ -55,6 +56,10 @@ func (t *Tester) ClusterScope() *Tester {
|
|||
return t
|
||||
}
|
||||
|
||||
func (t *Tester) SetUserInfo(userInfo user.Info) {
|
||||
t.tester.SetUserInfo(userInfo)
|
||||
}
|
||||
|
||||
func (t *Tester) Namer(namer func(int) string) *Tester {
|
||||
t.tester = t.tester.Namer(namer)
|
||||
return t
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
)
|
||||
|
|
@ -52,6 +53,7 @@ type Tester struct {
|
|||
generatesName bool
|
||||
returnDeletedObject bool
|
||||
namer func(int) string
|
||||
userInfo user.Info
|
||||
}
|
||||
|
||||
func New(t *testing.T, storage rest.Storage) *Tester {
|
||||
|
|
@ -102,10 +104,21 @@ func (t *Tester) TestNamespace() string {
|
|||
return "test"
|
||||
}
|
||||
|
||||
// SetUserInfo sets the UserInfo that should be present in the context when the
|
||||
// storage operation is called.
|
||||
func (t *Tester) SetUserInfo(userInfo user.Info) {
|
||||
t.userInfo = userInfo
|
||||
}
|
||||
|
||||
// TestContext returns a namespaced context that will be used when making storage calls.
|
||||
// Namespace is determined by TestNamespace()
|
||||
func (t *Tester) TestContext() context.Context {
|
||||
return genericapirequest.WithNamespace(genericapirequest.NewContext(), t.TestNamespace())
|
||||
ctx := genericapirequest.NewContext()
|
||||
ctx = genericapirequest.WithNamespace(ctx, t.TestNamespace())
|
||||
if t.userInfo != nil {
|
||||
ctx = genericapirequest.WithUser(ctx, t.userInfo)
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (t *Tester) getObjectMetaOrFail(obj runtime.Object) metav1.Object {
|
||||
|
|
|
|||
|
|
@ -531,6 +531,21 @@ Usage:
|
|||
```
|
||||
|
||||
|
||||
### podcertificatesigner
|
||||
|
||||
Runs a controller that signs PodCertificateRequests addressed to the signer name specified in the `--signer-name` flag. It generates a CA hierarchy in-memory at startup.
|
||||
|
||||
```console
|
||||
kubectl run test-agnhost \
|
||||
--generator=run-pod/v1 \
|
||||
--image=registry.k8s.io/e2e-test-images/agnhost:2.40 \
|
||||
--restart=Always \
|
||||
-- \
|
||||
podcertificatesigner \
|
||||
--signer-name=agnhost.k8s.io/testsigner
|
||||
```
|
||||
|
||||
|
||||
### port-forward-tester
|
||||
|
||||
Listens for TCP connections on a given address and port, optionally checks the data received,
|
||||
|
|
|
|||
|
|
@ -41,6 +41,7 @@ import (
|
|||
nosnatproxy "k8s.io/kubernetes/test/images/agnhost/no-snat-test-proxy"
|
||||
"k8s.io/kubernetes/test/images/agnhost/openidmetadata"
|
||||
"k8s.io/kubernetes/test/images/agnhost/pause"
|
||||
"k8s.io/kubernetes/test/images/agnhost/podcertificatesigner"
|
||||
portforwardtester "k8s.io/kubernetes/test/images/agnhost/port-forward-tester"
|
||||
"k8s.io/kubernetes/test/images/agnhost/porter"
|
||||
resconsumerctrl "k8s.io/kubernetes/test/images/agnhost/resource-consumer-controller"
|
||||
|
|
@ -88,6 +89,7 @@ func main() {
|
|||
rootCmd.AddCommand(openidmetadata.CmdTestServiceAccountIssuerDiscovery)
|
||||
rootCmd.AddCommand(grpchealthchecking.CmdGrpcHealthChecking)
|
||||
rootCmd.AddCommand(vishhstress.CmdStress)
|
||||
rootCmd.AddCommand(podcertificatesigner.CmdPodCertificateSigner)
|
||||
|
||||
// NOTE(claudiub): Some tests are passing logging related flags, so we need to be able to
|
||||
// accept them. This will also include them in the printed help.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package podcertificatesigner is an agnhost subcommand implementing a toy
|
||||
// PodCertificateRequest signer. It is meant to run continuously in an
|
||||
// in-cluster pod.
|
||||
package podcertificatesigner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/component-base/logs"
|
||||
"k8s.io/kubernetes/test/utils/hermeticpodcertificatesigner"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
var CmdPodCertificateSigner = &cobra.Command{
|
||||
Use: "podcertificatesigner",
|
||||
Short: "Sign PodCertificateRequests addressed to a given signer",
|
||||
Args: cobra.MaximumNArgs(0),
|
||||
RunE: run,
|
||||
}
|
||||
|
||||
var kubeconfigPath string
|
||||
var signerName string
|
||||
|
||||
func init() {
|
||||
CmdPodCertificateSigner.Flags().StringVar(&kubeconfigPath, "kubeconfig", "", "Path to kubeconfig file to use for connection. If omitted, in-cluster config will be used.")
|
||||
CmdPodCertificateSigner.Flags().StringVar(&signerName, "signer-name", "", "The signer name to sign certificates for")
|
||||
}
|
||||
|
||||
func run(cmd *cobra.Command, args []string) error {
|
||||
flag.Set("logtostderr", "true")
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
|
||||
cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while building client config: %w", err)
|
||||
}
|
||||
|
||||
kc, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while creating kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
caKeys, caCerts, err := hermeticpodcertificatesigner.GenerateCAHierarchy(1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while generating CA hierarchy: %w", err)
|
||||
}
|
||||
|
||||
c := hermeticpodcertificatesigner.New(clock.RealClock{}, signerName, caKeys, caCerts, kc)
|
||||
go c.Run(ctx)
|
||||
|
||||
// Wait for a shutdown signal.
|
||||
signalCh := make(chan os.Signal, 1)
|
||||
signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-signalCh
|
||||
|
||||
// Canceling the context will begin exiting all of our controllers.
|
||||
cancel()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -224,6 +224,13 @@ func GetEtcdStorageDataForNamespaceServedAt(namespace string, v string, isEmulat
|
|||
IntroducedVersion: "1.26",
|
||||
RemovedVersion: "1.37",
|
||||
},
|
||||
gvr("certificates.k8s.io", "v1alpha1", "podcertificaterequests"): {
|
||||
Stub: `{"metadata": {"name": "req-1"}, "spec": {"signerName":"example.com/signer", "podName":"pod-1", "podUID":"pod-uid-1", "serviceAccountName":"sa-1", "serviceAccountUID":"sa-uid-1", "nodeName":"node-1", "nodeUID":"node-uid-1", "maxExpirationSeconds":86400, "pkixPublicKey":"MCowBQYDK2VwAyEA5g+rk9q/hjojtc2nwHJ660RdX5w1f4AK0/kP391QyLY=", "proofOfPossession":"SuGHX7SMyPHuN5cD5wjKLXGNbhdlCYUnTH65JkTx17iWlLynQ/g9GiTYObftSHNzqRh0ofdgAGqK6a379O7RBw=="}}`,
|
||||
ExpectedEtcdPath: "/registry/podcertificaterequests/" + namespace + "/req-1",
|
||||
ExpectedGVK: gvkP("certificates.k8s.io", "v1alpha1", "PodCertificateRequest"),
|
||||
IntroducedVersion: "1.34",
|
||||
RemovedVersion: "1.37",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/certificates/v1beta1
|
||||
|
|
|
|||
|
|
@ -0,0 +1,410 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
certsv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corelistersv1 "k8s.io/client-go/listers/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/podcertificate"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"k8s.io/kubernetes/test/utils/hermeticpodcertificatesigner"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
"k8s.io/utils/clock"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func TestPodCertificateManager(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(ktesting.Init(t))
|
||||
defer cancel()
|
||||
|
||||
// Run an apiserver with PodCertificateRequest features enabled.
|
||||
s := kubeapiservertesting.StartTestServerOrDie(
|
||||
t,
|
||||
kubeapiservertesting.NewDefaultTestServerOptions(),
|
||||
[]string{
|
||||
"--authorization-mode=Node,RBAC",
|
||||
"--feature-gates=AuthorizeNodeWithSelectors=true,PodCertificateRequest=true",
|
||||
fmt.Sprintf("--runtime-config=%s=true", certsv1alpha1.SchemeGroupVersion),
|
||||
},
|
||||
framework.SharedEtcd(),
|
||||
)
|
||||
defer s.TearDownFn()
|
||||
|
||||
adminClient := kubernetes.NewForConfigOrDie(s.ClientConfig)
|
||||
|
||||
var err error
|
||||
|
||||
//
|
||||
// Configure and boot up a fake podcertificaterequest signing controller.
|
||||
//
|
||||
|
||||
signerName := "foo.com/signer"
|
||||
|
||||
signerSA, err := adminClient.CoreV1().ServiceAccounts("kube-system").Create(ctx, &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "kube-system",
|
||||
Name: "foo-pcr-signing-controller",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating signer service account: %v", err)
|
||||
}
|
||||
|
||||
signerClusterRole, err := adminClient.RbacV1().ClusterRoles().Create(ctx, &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo-com-pcr-signer",
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"certificates.k8s.io"},
|
||||
Resources: []string{"signers"},
|
||||
Verbs: []string{"sign"},
|
||||
ResourceNames: []string{"foo.com/*"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"certificates.k8s.io"},
|
||||
Resources: []string{"podcertificaterequests"},
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"certificates.k8s.io"},
|
||||
Resources: []string{"podcertificaterequests/status"},
|
||||
Verbs: []string{"update"},
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating signer ClusterRole: %v", err)
|
||||
}
|
||||
|
||||
_, err = adminClient.RbacV1().ClusterRoleBindings().Create(ctx, &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "system:serviceaccount:kube-system:foo-pcr-signer",
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: signerClusterRole.Name,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: signerSA.ObjectMeta.Namespace,
|
||||
Name: signerSA.ObjectMeta.Name,
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating signer ClusterRoleBinding: %v", err)
|
||||
}
|
||||
|
||||
signerClient := mustServiceAccountClient(t, s.ClientConfig, signerSA.ObjectMeta.Namespace, signerSA.ObjectMeta.Name)
|
||||
caKeys, caCerts, err := hermeticpodcertificatesigner.GenerateCAHierarchy(1)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error generating CA hierarchy: %v", err)
|
||||
}
|
||||
pcrSigner := hermeticpodcertificatesigner.New(clock.RealClock{}, signerName, caKeys, caCerts, signerClient)
|
||||
go pcrSigner.Run(ctx)
|
||||
|
||||
//
|
||||
// Configure and boot up enough Kubelet subsystems to run
|
||||
// podcertificate.IssuingManager.
|
||||
//
|
||||
node1, err := adminClient.CoreV1().Nodes().Create(ctx, &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating node1: %v", err)
|
||||
}
|
||||
|
||||
node1Client := mustNodeClient(t, s.ClientConfig, node1.ObjectMeta.Name)
|
||||
node1PodInformerFactory := informers.NewSharedInformerFactoryWithOptions(node1Client, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
||||
options.FieldSelector = "spec.nodeName=" + node1.ObjectMeta.Name
|
||||
}))
|
||||
node1PCRInformerFactory := informers.NewSharedInformerFactoryWithOptions(node1Client, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
||||
options.FieldSelector = "spec.nodeName=" + node1.ObjectMeta.Name
|
||||
}))
|
||||
node1NodeInformerFactory := informers.NewSharedInformerFactoryWithOptions(node1Client, 0, informers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
||||
options.FieldSelector = "metadata.name=" + node1.ObjectMeta.Name
|
||||
}))
|
||||
|
||||
node1PodManager := &FakePodManager{
|
||||
podLister: node1PodInformerFactory.Core().V1().Pods().Lister(),
|
||||
}
|
||||
|
||||
node1PodCertificateManager := podcertificate.NewIssuingManager(
|
||||
node1Client,
|
||||
node1PodManager,
|
||||
node1PCRInformerFactory.Certificates().V1alpha1().PodCertificateRequests(),
|
||||
node1NodeInformerFactory.Core().V1().Nodes(),
|
||||
types.NodeName(node1.ObjectMeta.Name),
|
||||
clock.RealClock{},
|
||||
)
|
||||
|
||||
node1PodInformerFactory.Start(ctx.Done())
|
||||
node1PCRInformerFactory.Start(ctx.Done())
|
||||
node1NodeInformerFactory.Start(ctx.Done())
|
||||
go node1PodCertificateManager.Run(ctx)
|
||||
|
||||
//
|
||||
// Make a pod that uses a podcertificate volume.
|
||||
//
|
||||
|
||||
workloadNS, err := adminClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "workload-ns",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating workload namespace: %v", err)
|
||||
}
|
||||
|
||||
workloadSA, err := adminClient.CoreV1().ServiceAccounts(workloadNS.ObjectMeta.Name).Create(ctx, &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: workloadNS.ObjectMeta.Name,
|
||||
Name: "workload",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating workload serviceaccount: %v", err)
|
||||
}
|
||||
|
||||
workloadPod, err := adminClient.CoreV1().Pods(workloadNS.ObjectMeta.Name).Create(ctx, &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: workloadNS.ObjectMeta.Name,
|
||||
Name: "workload",
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ServiceAccountName: workloadSA.ObjectMeta.Name,
|
||||
NodeName: node1.ObjectMeta.Name,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: "notarealimage",
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "certificate",
|
||||
MountPath: "/run/foo-cert",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "certificate",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Projected: &corev1.ProjectedVolumeSource{
|
||||
Sources: []corev1.VolumeProjection{
|
||||
{
|
||||
PodCertificate: &corev1.PodCertificateProjection{
|
||||
SignerName: signerName,
|
||||
KeyType: "ED25519",
|
||||
CredentialBundlePath: "creds.pem",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating workload pod: %v", err)
|
||||
}
|
||||
|
||||
// Because our fake podManager is based on an informer, we need to poll
|
||||
// until workloadPod is reflected in the informer.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, ok := node1PodManager.GetPodByUID(workloadPod.ObjectMeta.UID)
|
||||
return ok, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while waiting node1 podManager to know about workloadPod: %v", err)
|
||||
}
|
||||
|
||||
node1PodCertificateManager.TrackPod(ctx, workloadPod)
|
||||
|
||||
// Within a few seconds, we should see a PodCertificateRequest created for
|
||||
// this pod.
|
||||
var gotPCR *certsv1alpha1.PodCertificateRequest
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
pcrs, err := adminClient.CertificatesV1alpha1().PodCertificateRequests(workloadNS.ObjectMeta.Name).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("while listing PodCertificateRequests: %w", err)
|
||||
}
|
||||
|
||||
if len(pcrs.Items) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
gotPCR = &pcrs.Items[0]
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while waiting for PCR to be created: %v", err)
|
||||
}
|
||||
|
||||
// Check that the created PCR spec matches expectations. Blank out fields on
|
||||
// gotPCR that we don't care about. Blank out status, because the
|
||||
// controller might have already signed it.
|
||||
wantPCR := &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: workloadNS.ObjectMeta.Name,
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: workloadPod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].PodCertificate.SignerName,
|
||||
PodName: workloadPod.ObjectMeta.Name,
|
||||
PodUID: workloadPod.ObjectMeta.UID,
|
||||
ServiceAccountName: workloadSA.ObjectMeta.Name,
|
||||
ServiceAccountUID: workloadSA.ObjectMeta.UID,
|
||||
NodeName: types.NodeName(node1.ObjectMeta.Name),
|
||||
NodeUID: node1.ObjectMeta.UID,
|
||||
MaxExpirationSeconds: ptr.To[int32](86400),
|
||||
},
|
||||
}
|
||||
gotPCRClone := gotPCR.DeepCopy()
|
||||
gotPCRClone.ObjectMeta = metav1.ObjectMeta{}
|
||||
gotPCRClone.ObjectMeta.Namespace = gotPCR.ObjectMeta.Namespace
|
||||
gotPCRClone.Spec.PKIXPublicKey = nil
|
||||
gotPCRClone.Spec.ProofOfPossession = nil
|
||||
gotPCRClone.Status = certsv1alpha1.PodCertificateRequestStatus{}
|
||||
if diff := cmp.Diff(gotPCRClone, wantPCR); diff != "" {
|
||||
t.Fatalf("PodCertificateManager created a bad PCR; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
|
||||
// Wait some more time for the PCR to be issued.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
pcrs, err := adminClient.CertificatesV1alpha1().PodCertificateRequests(workloadNS.ObjectMeta.Name).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("while listing PodCertificateRequests: %w", err)
|
||||
}
|
||||
|
||||
if len(pcrs.Items) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
gotPCR = &pcrs.Items[0]
|
||||
|
||||
for _, cond := range gotPCR.Status.Conditions {
|
||||
switch cond.Type {
|
||||
case certsv1alpha1.PodCertificateRequestConditionTypeDenied,
|
||||
certsv1alpha1.PodCertificateRequestConditionTypeFailed,
|
||||
certsv1alpha1.PodCertificateRequestConditionTypeIssued:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while waiting for PCR to be issued: %v", err)
|
||||
}
|
||||
|
||||
isIssued := slices.ContainsFunc(gotPCR.Status.Conditions, func(cond metav1.Condition) bool {
|
||||
return cond.Type == certsv1alpha1.PodCertificateRequestConditionTypeIssued
|
||||
})
|
||||
if !isIssued {
|
||||
t.Fatalf("The test signingController didn't issue the PCR:\n%+v", gotPCR)
|
||||
}
|
||||
|
||||
// Now we know that the PCR was issued, so we can wait for the
|
||||
// podcertificate manager to return some valid credentials.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, _, err := node1PodCertificateManager.GetPodCertificateCredentialBundle(ctx, workloadPod.ObjectMeta.Namespace, workloadPod.ObjectMeta.Name, string(workloadPod.ObjectMeta.UID), "certificate", 0)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error while waiting for podcertificate manager to return valid credentials: %v", err)
|
||||
}
|
||||
|
||||
_, certChain, err := node1PodCertificateManager.GetPodCertificateCredentialBundle(ctx, workloadPod.ObjectMeta.Namespace, workloadPod.ObjectMeta.Name, string(workloadPod.ObjectMeta.UID), "certificate", 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting credentials from pod certificate manager: %v", err)
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(string(certChain), gotPCR.Status.CertificateChain); diff != "" {
|
||||
t.Fatalf("PodCertificate manager returned bad cert chain; diff (-got +want)\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
type FakePodManager struct {
|
||||
podLister corelistersv1.PodLister
|
||||
}
|
||||
|
||||
func (f *FakePodManager) GetPods() []*corev1.Pod {
|
||||
ret, _ := f.podLister.List(labels.Everything())
|
||||
return ret
|
||||
}
|
||||
|
||||
func (f *FakePodManager) GetPodByUID(uid types.UID) (*corev1.Pod, bool) {
|
||||
list, err := f.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
for _, pod := range list {
|
||||
if pod.ObjectMeta.UID == uid {
|
||||
return pod, true
|
||||
}
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func mustServiceAccountClient(t *testing.T, cfg *restclient.Config, ns, sa string) *kubernetes.Clientset {
|
||||
newCfg := restclient.CopyConfig(cfg)
|
||||
newCfg.Impersonate.UserName = fmt.Sprintf("system:serviceaccount:%s:%s", ns, sa)
|
||||
newCfg.Impersonate.Groups = []string{"system:authenticated", "system:serviceaccounts"}
|
||||
kc, err := kubernetes.NewForConfig(newCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating kubernetes client impersonating %q", newCfg.Impersonate.UserName)
|
||||
}
|
||||
return kc
|
||||
}
|
||||
|
||||
func mustNodeClient(t *testing.T, cfg *restclient.Config, node string) *kubernetes.Clientset {
|
||||
newCfg := restclient.CopyConfig(cfg)
|
||||
newCfg.Impersonate.UserName = fmt.Sprintf("system:node:%s", node)
|
||||
newCfg.Impersonate.Groups = []string{"system:authenticated", "system:nodes"}
|
||||
kc, err := kubernetes.NewForConfig(newCfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating kubernetes client impersonating %q", newCfg.Impersonate.UserName)
|
||||
}
|
||||
return kc
|
||||
}
|
||||
|
|
@ -0,0 +1,815 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podcertificaterequests
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
certsv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2/ktesting"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/controller/certificates/cleaner"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
||||
|
||||
func TestCleanerController(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Run an apiserver with PodCertificateRequest features enabled.
|
||||
s := kubeapiservertesting.StartTestServerOrDie(
|
||||
t,
|
||||
kubeapiservertesting.NewDefaultTestServerOptions(),
|
||||
[]string{
|
||||
"--authorization-mode=Node,RBAC",
|
||||
"--feature-gates=AuthorizeNodeWithSelectors=true,PodCertificateRequest=true",
|
||||
fmt.Sprintf("--runtime-config=%s=true", certsv1alpha1.SchemeGroupVersion),
|
||||
},
|
||||
framework.SharedEtcd(),
|
||||
)
|
||||
defer s.TearDownFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(s.ClientConfig)
|
||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(s.ClientConfig, "certificatesigningrequest-informers")), time.Second)
|
||||
|
||||
// Register the cleaner controller with a short configured timeout. Within
|
||||
// 15 seconds the PCR should be deleted.
|
||||
cleanerClient, err := serviceAccountClient(s.ClientConfig, "kube-system", "podcertificaterequestcleaner")
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating client that impersonates kube-system/podcertificaterequestcleaner: %v", err)
|
||||
}
|
||||
c := cleaner.NewPCRCleanerController(
|
||||
cleanerClient,
|
||||
informers.Certificates().V1alpha1().PodCertificateRequests(),
|
||||
clock.RealClock{},
|
||||
1*time.Second,
|
||||
1*time.Second,
|
||||
)
|
||||
go c.Run(ctx, 1)
|
||||
|
||||
// Start the controller & informers
|
||||
informers.Start(ctx.Done())
|
||||
|
||||
// Make a node
|
||||
node := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
Spec: corev1.NodeSpec{},
|
||||
}
|
||||
node, err = client.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating node1: %v", err)
|
||||
}
|
||||
|
||||
// Make a serviceaccount
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "sa1",
|
||||
},
|
||||
}
|
||||
sa, err = client.CoreV1().ServiceAccounts("default").Create(ctx, sa, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating sa1: %v", err)
|
||||
}
|
||||
|
||||
// Make a pod
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pod1",
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ServiceAccountName: sa.ObjectMeta.Name,
|
||||
NodeName: node.ObjectMeta.Name,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: "notarealimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod, err = client.CoreV1().Pods("default").Create(ctx, pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating pod1: %v", err)
|
||||
}
|
||||
|
||||
// Create a clientset that impersonates node1
|
||||
node1Client, err := nodeClient(s.ClientConfig, "node1")
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
|
||||
// Have node1 create a PodCertificateRequest for pod1
|
||||
_, _, pubPKIX, proof := mustMakeEd25519KeyAndProof(t, []byte(pod.ObjectMeta.UID))
|
||||
pcr := &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pcr1",
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: "kubernetes.io/foo",
|
||||
PodName: pod.ObjectMeta.Name,
|
||||
PodUID: pod.ObjectMeta.UID,
|
||||
ServiceAccountName: sa.ObjectMeta.Name,
|
||||
ServiceAccountUID: sa.ObjectMeta.UID,
|
||||
NodeName: types.NodeName(node.ObjectMeta.Name),
|
||||
NodeUID: node.ObjectMeta.UID,
|
||||
PKIXPublicKey: pubPKIX,
|
||||
ProofOfPossession: proof,
|
||||
},
|
||||
}
|
||||
pcr, err = node1Client.CertificatesV1alpha1().PodCertificateRequests(pcr.ObjectMeta.Namespace).Create(ctx, pcr, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating PodCertificateRequest: %v", err)
|
||||
}
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, err := client.CertificatesV1alpha1().PodCertificateRequests(pcr.ObjectMeta.Namespace).Get(ctx, pcr.ObjectMeta.Name, metav1.GetOptions{})
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error after waiting for PodCertificateRequest to be deleted: %v", err)
|
||||
}
|
||||
|
||||
// TODO(KEP-4317): For beta, check via audit logs that it was the cleaner
|
||||
// controller that issued the deletion.
|
||||
}
|
||||
|
||||
func TestNodeRestriction(t *testing.T) {
|
||||
// Create a setup with two nodes, and a pod running on node1. Node2 cannot
|
||||
// make a PodCertificateRequest that refers to the pod on node1.
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Run an apiserver with PodCertificateRequest features enabled.
|
||||
s := kubeapiservertesting.StartTestServerOrDie(
|
||||
t,
|
||||
kubeapiservertesting.NewDefaultTestServerOptions(),
|
||||
[]string{
|
||||
"--authorization-mode=Node,RBAC",
|
||||
"--enable-admission-plugins=NodeRestriction",
|
||||
"--feature-gates=AuthorizeNodeWithSelectors=true,PodCertificateRequest=true",
|
||||
fmt.Sprintf("--runtime-config=%s=true", certsv1alpha1.SchemeGroupVersion),
|
||||
},
|
||||
framework.SharedEtcd(),
|
||||
)
|
||||
defer s.TearDownFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(s.ClientConfig)
|
||||
|
||||
// Make node1 and node2
|
||||
node1 := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
Spec: corev1.NodeSpec{},
|
||||
}
|
||||
node1, err := client.CoreV1().Nodes().Create(ctx, node1, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating node1: %v", err)
|
||||
}
|
||||
node2 := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2",
|
||||
},
|
||||
Spec: corev1.NodeSpec{},
|
||||
}
|
||||
node2, err = client.CoreV1().Nodes().Create(ctx, node2, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating node1: %v", err)
|
||||
}
|
||||
|
||||
// Create clientsets for nodes
|
||||
node1Client, err := nodeClient(s.ClientConfig, "node1")
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
node2Client, err := nodeClient(s.ClientConfig, "node2")
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
|
||||
// Make a serviceaccount
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "sa1",
|
||||
},
|
||||
}
|
||||
sa, err = client.CoreV1().ServiceAccounts("default").Create(ctx, sa, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating sa1: %v", err)
|
||||
}
|
||||
|
||||
// Make a pod
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pod1",
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ServiceAccountName: sa.ObjectMeta.Name,
|
||||
NodeName: node1.ObjectMeta.Name,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: "notarealimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod, err = client.CoreV1().Pods("default").Create(ctx, pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating pod1: %v", err)
|
||||
}
|
||||
|
||||
t.Run("node1 can create PCR for pod on node1", func(t *testing.T) {
|
||||
// Have node2 create a PodCertificateRequest for pod1
|
||||
_, _, pubPKIX, proof := mustMakeEd25519KeyAndProof(t, []byte(pod.ObjectMeta.UID))
|
||||
pcr := &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pcr1",
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: "kubernetes.io/foo",
|
||||
PodName: pod.ObjectMeta.Name,
|
||||
PodUID: pod.ObjectMeta.UID,
|
||||
ServiceAccountName: sa.ObjectMeta.Name,
|
||||
ServiceAccountUID: sa.ObjectMeta.UID,
|
||||
NodeName: types.NodeName(node1.ObjectMeta.Name),
|
||||
NodeUID: node1.ObjectMeta.UID,
|
||||
PKIXPublicKey: pubPKIX,
|
||||
ProofOfPossession: proof,
|
||||
},
|
||||
}
|
||||
|
||||
// Informer lag inside kube-apiserver could cause us to get transient
|
||||
// errors.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, err = node1Client.CertificatesV1alpha1().PodCertificateRequests("default").Create(ctx, pcr, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("PCR creation unexpectedly failed: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("node2 cannot create PCR for pod on node1", func(t *testing.T) {
|
||||
// Have node2 create a PodCertificateRequest for pod1
|
||||
_, _, pubPKIX, proof := mustMakeEd25519KeyAndProof(t, []byte(pod.ObjectMeta.UID))
|
||||
pcr := &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pcr1",
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: "kubernetes.io/foo",
|
||||
PodName: pod.ObjectMeta.Name,
|
||||
PodUID: pod.ObjectMeta.UID,
|
||||
ServiceAccountName: sa.ObjectMeta.Name,
|
||||
ServiceAccountUID: sa.ObjectMeta.UID,
|
||||
NodeName: types.NodeName(node1.ObjectMeta.Name),
|
||||
NodeUID: node1.ObjectMeta.UID,
|
||||
PKIXPublicKey: pubPKIX,
|
||||
ProofOfPossession: proof,
|
||||
},
|
||||
}
|
||||
|
||||
// Informer lag inside kube-apiserver could cause us to get a
|
||||
// non-Forbidden error from the noderestriction admission plugin. This
|
||||
// should be transient, so wait for some time to see if we reach our
|
||||
// durable error condition.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, err = node2Client.CertificatesV1alpha1().PodCertificateRequests("default").Create(ctx, pcr, metav1.CreateOptions{})
|
||||
if err == nil || k8serrors.IsForbidden(err) {
|
||||
return true, err
|
||||
}
|
||||
return false, err
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("PCR creation unexpectedly succeeded")
|
||||
} else if !k8serrors.IsForbidden(err) {
|
||||
t.Fatalf("PCR creation failed with unexpected error code (wanted Forbidden): %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("node2 cannot create PCR for pod that doesn't exist", func(t *testing.T) {
|
||||
// Have node2 create a PodCertificateRequest for pod1
|
||||
_, _, pubPKIX, proof := mustMakeEd25519KeyAndProof(t, []byte(pod.ObjectMeta.UID))
|
||||
pcr := &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pcr1",
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: "kubernetes.io/foo",
|
||||
PodName: "dnepod",
|
||||
PodUID: "dnepoduid",
|
||||
ServiceAccountName: sa.ObjectMeta.Name,
|
||||
ServiceAccountUID: sa.ObjectMeta.UID,
|
||||
NodeName: types.NodeName(node2.ObjectMeta.Name),
|
||||
NodeUID: node2.ObjectMeta.UID,
|
||||
PKIXPublicKey: pubPKIX,
|
||||
ProofOfPossession: proof,
|
||||
},
|
||||
}
|
||||
|
||||
// The noderestriction admission plugin will *not* return Forbidden,
|
||||
// since this situation could always be caused by informer lag. Just
|
||||
// hold here for 15 seconds and assume if we're still getting an error,
|
||||
// then it can't be due to informer lag.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, err = node2Client.CertificatesV1alpha1().PodCertificateRequests("default").Create(ctx, pcr, metav1.CreateOptions{})
|
||||
if err == nil {
|
||||
return true, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err == nil { // EQUALS nil
|
||||
t.Fatalf("PCR creation unexpectedly succeeded")
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeAuthorization(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Run an apiserver with PodCertificateRequest features enabled.
|
||||
s := kubeapiservertesting.StartTestServerOrDie(
|
||||
t,
|
||||
kubeapiservertesting.NewDefaultTestServerOptions(),
|
||||
[]string{
|
||||
"--authorization-mode=Node,RBAC",
|
||||
"--enable-admission-plugins=NodeRestriction",
|
||||
"--feature-gates=AuthorizeNodeWithSelectors=true,PodCertificateRequest=true",
|
||||
fmt.Sprintf("--runtime-config=%s=true", certsv1alpha1.SchemeGroupVersion),
|
||||
},
|
||||
framework.SharedEtcd(),
|
||||
)
|
||||
defer s.TearDownFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(s.ClientConfig)
|
||||
|
||||
// Make node1 and node2
|
||||
node1 := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
Spec: corev1.NodeSpec{},
|
||||
}
|
||||
node1, err := client.CoreV1().Nodes().Create(ctx, node1, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating node1: %v", err)
|
||||
}
|
||||
node2 := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2",
|
||||
},
|
||||
Spec: corev1.NodeSpec{},
|
||||
}
|
||||
_, err = client.CoreV1().Nodes().Create(ctx, node2, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating node1: %v", err)
|
||||
}
|
||||
|
||||
// Make a serviceaccount
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "sa1",
|
||||
},
|
||||
}
|
||||
sa, err = client.CoreV1().ServiceAccounts("default").Create(ctx, sa, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating sa1: %v", err)
|
||||
}
|
||||
|
||||
// Make a pod
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pod1",
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ServiceAccountName: sa.ObjectMeta.Name,
|
||||
NodeName: node1.ObjectMeta.Name,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: "notarealimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod, err = client.CoreV1().Pods("default").Create(ctx, pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating pod1: %v", err)
|
||||
}
|
||||
|
||||
// Create a clientsets that impersonate the nodes
|
||||
node1Client, err := nodeClient(s.ClientConfig, "node1")
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
node2Client, err := nodeClient(s.ClientConfig, "node2")
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
|
||||
// Have node1 create a PodCertificateRequest for pod1
|
||||
_, _, pubPKIX, proof := mustMakeEd25519KeyAndProof(t, []byte(pod.ObjectMeta.UID))
|
||||
pcr := &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pcr1",
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: "kubernetes.io/foo",
|
||||
PodName: pod.ObjectMeta.Name,
|
||||
PodUID: pod.ObjectMeta.UID,
|
||||
ServiceAccountName: sa.ObjectMeta.Name,
|
||||
ServiceAccountUID: sa.ObjectMeta.UID,
|
||||
NodeName: types.NodeName(node1.ObjectMeta.Name),
|
||||
NodeUID: node1.ObjectMeta.UID,
|
||||
PKIXPublicKey: pubPKIX,
|
||||
ProofOfPossession: proof,
|
||||
},
|
||||
}
|
||||
|
||||
// Creating the PCR could fail if there is informer lag in the
|
||||
// noderestriction logic. Poll until it succeeds.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, err = node1Client.CertificatesV1alpha1().PodCertificateRequests("default").Create(ctx, pcr, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating PodCertificateRequest: %v", err)
|
||||
}
|
||||
|
||||
t.Run("node1 can directly get pcr1", func(t *testing.T) {
|
||||
_, err := node1Client.CertificatesV1alpha1().PodCertificateRequests("default").Get(ctx, "pcr1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error listing PodCertificateRequests as node1: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("node1 can see pcr1 when listing", func(t *testing.T) {
|
||||
pcrList, err := node1Client.CertificatesV1alpha1().PodCertificateRequests("default").List(ctx, metav1.ListOptions{
|
||||
FieldSelector: "spec.nodeName=node1",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error listing PodCertificateRequests as node1: %v", err)
|
||||
}
|
||||
|
||||
if len(pcrList.Items) != 1 {
|
||||
t.Fatalf("Unexpected list length returned when node1 lists PodCertificateRequests; got %d, want 1", len(pcrList.Items))
|
||||
}
|
||||
|
||||
if pcrList.Items[0].ObjectMeta.Name != "pcr1" {
|
||||
t.Fatalf("Unexpected list contents returned when node1 lists PodCertificateRequests; got %q want %q", pcrList.Items[0].ObjectMeta.Name, "pcr1")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("node2 cannot list with field selector for node1", func(t *testing.T) {
|
||||
_, err := node2Client.CertificatesV1alpha1().PodCertificateRequests("default").List(ctx, metav1.ListOptions{
|
||||
FieldSelector: "spec.nodeName=node1",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("Listing PodCertificateRequests unexpectedly succeeded")
|
||||
} else if !k8serrors.IsForbidden(err) {
|
||||
t.Fatalf("Listing PodCertificateRequests failed with unexpected error (want Forbidden): %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("node2 cannot directly get pcr1", func(t *testing.T) {
|
||||
_, err := node2Client.CertificatesV1alpha1().PodCertificateRequests("default").Get(ctx, "pcr1", metav1.GetOptions{})
|
||||
if err == nil {
|
||||
t.Fatalf("Getting pcr1 unexpectedly succeeded")
|
||||
} else if !k8serrors.IsForbidden(err) {
|
||||
t.Fatalf("Getting pcr1 failed with unexpected error (want Forbidden): %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("node2 cannot see pcr1 when listing", func(t *testing.T) {
|
||||
pcrList, err := node2Client.CertificatesV1alpha1().PodCertificateRequests("default").List(ctx, metav1.ListOptions{
|
||||
FieldSelector: "spec.nodeName=node2",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error listing PodCertificateRequests as node2: %v", err)
|
||||
}
|
||||
|
||||
if len(pcrList.Items) != 0 {
|
||||
t.Fatalf("Unexpected list length returned when node2 lists PodCertificateRequests; got %d, want 0", len(pcrList.Items))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeAuthorizerNamespaceNameConfusion(t *testing.T) {
|
||||
// A targeted test case to make sure that the node authorizer isn't mixing
|
||||
// up namespaces and names.
|
||||
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Run an apiserver with PodCertificateRequest features enabled.
|
||||
s := kubeapiservertesting.StartTestServerOrDie(
|
||||
t,
|
||||
kubeapiservertesting.NewDefaultTestServerOptions(),
|
||||
[]string{
|
||||
"--authorization-mode=Node,RBAC",
|
||||
"--enable-admission-plugins=NodeRestriction",
|
||||
"--feature-gates=AuthorizeNodeWithSelectors=true,PodCertificateRequest=true",
|
||||
fmt.Sprintf("--runtime-config=%s=true", certsv1alpha1.SchemeGroupVersion),
|
||||
},
|
||||
framework.SharedEtcd(),
|
||||
)
|
||||
defer s.TearDownFn()
|
||||
|
||||
client := clientset.NewForConfigOrDie(s.ClientConfig)
|
||||
|
||||
// Make node1 and node2
|
||||
node1 := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
Spec: corev1.NodeSpec{},
|
||||
}
|
||||
node1, err := client.CoreV1().Nodes().Create(ctx, node1, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating node1: %v", err)
|
||||
}
|
||||
node2 := &corev1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node2",
|
||||
},
|
||||
Spec: corev1.NodeSpec{},
|
||||
}
|
||||
node2, err = client.CoreV1().Nodes().Create(ctx, node2, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating node1: %v", err)
|
||||
}
|
||||
|
||||
// Make namespaces "foo" and "bar"
|
||||
_, err = client.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating namespace foo: %v", err)
|
||||
}
|
||||
_, err = client.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bar",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating namespace nar: %v", err)
|
||||
}
|
||||
|
||||
// Make a serviceaccount in each namespace
|
||||
saFoo := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "foo",
|
||||
},
|
||||
}
|
||||
saFoo, err = client.CoreV1().ServiceAccounts("foo").Create(ctx, saFoo, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating serviceaccount foo: %v", err)
|
||||
}
|
||||
saBar := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "bar",
|
||||
Name: "bar",
|
||||
},
|
||||
}
|
||||
saBar, err = client.CoreV1().ServiceAccounts("bar").Create(ctx, saBar, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating serviceaccount bar: %v", err)
|
||||
}
|
||||
|
||||
// Make a pod named "foo" in namespace "bar", and vice-versa
|
||||
podBarFoo := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "bar",
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ServiceAccountName: saBar.ObjectMeta.Name,
|
||||
NodeName: node1.ObjectMeta.Name,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: "notarealimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
podBarFoo, err = client.CoreV1().Pods("bar").Create(ctx, podBarFoo, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating pod bar/foo: %v", err)
|
||||
}
|
||||
podFooBar := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ServiceAccountName: saFoo.ObjectMeta.Name,
|
||||
NodeName: node2.ObjectMeta.Name,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: "notarealimage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
podFooBar, err = client.CoreV1().Pods("foo").Create(ctx, podFooBar, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating pod foo/bar: %v", err)
|
||||
}
|
||||
|
||||
// Create a clientsets that impersonate the nodes
|
||||
node1Client, err := nodeClient(s.ClientConfig, "node1")
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
node2Client, err := nodeClient(s.ClientConfig, "node2")
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create clientset: %v", err)
|
||||
}
|
||||
|
||||
// Have node1 create a PodCertificateRequest for bar/foo
|
||||
_, _, pubPKIX, proof := mustMakeEd25519KeyAndProof(t, []byte(podBarFoo.ObjectMeta.UID))
|
||||
pcrBarFoo := &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "bar",
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: "kubernetes.io/foo",
|
||||
PodName: podBarFoo.ObjectMeta.Name,
|
||||
PodUID: podBarFoo.ObjectMeta.UID,
|
||||
ServiceAccountName: saBar.ObjectMeta.Name,
|
||||
ServiceAccountUID: saBar.ObjectMeta.UID,
|
||||
NodeName: types.NodeName(node1.ObjectMeta.Name),
|
||||
NodeUID: node1.ObjectMeta.UID,
|
||||
PKIXPublicKey: pubPKIX,
|
||||
ProofOfPossession: proof,
|
||||
},
|
||||
}
|
||||
// Creating the PCR could fail if there is informer lag in the
|
||||
// noderestriction logic. Poll until it succeeds.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, err = node1Client.CertificatesV1alpha1().PodCertificateRequests("bar").Create(ctx, pcrBarFoo, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating pcr bar/foo: %v", err)
|
||||
}
|
||||
|
||||
// Have node2 create a PodCertificateRequest for foo/bar
|
||||
_, _, pubPKIXFooBar, proofFooBar := mustMakeEd25519KeyAndProof(t, []byte(podFooBar.ObjectMeta.UID))
|
||||
pcrFooBar := &certsv1alpha1.PodCertificateRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "foo",
|
||||
Name: "bar",
|
||||
},
|
||||
Spec: certsv1alpha1.PodCertificateRequestSpec{
|
||||
SignerName: "kubernetes.io/foo",
|
||||
PodName: podFooBar.ObjectMeta.Name,
|
||||
PodUID: podFooBar.ObjectMeta.UID,
|
||||
ServiceAccountName: saFoo.ObjectMeta.Name,
|
||||
ServiceAccountUID: saFoo.ObjectMeta.UID,
|
||||
NodeName: types.NodeName(node2.ObjectMeta.Name),
|
||||
NodeUID: node2.ObjectMeta.UID,
|
||||
PKIXPublicKey: pubPKIXFooBar,
|
||||
ProofOfPossession: proofFooBar,
|
||||
},
|
||||
}
|
||||
// Creating the PCR could fail if there is informer lag in the
|
||||
// noderestriction logic. Poll until it succeeds.
|
||||
err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 15*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, err = node2Client.CertificatesV1alpha1().PodCertificateRequests("foo").Create(ctx, pcrFooBar, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error creating pcr foo/bar: %v", err)
|
||||
}
|
||||
|
||||
t.Run("node1 can directly get bar/foo", func(t *testing.T) {
|
||||
_, err := node1Client.CertificatesV1alpha1().PodCertificateRequests("bar").Get(ctx, "foo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting bar/foo as node1: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("node2 can directly get foo/bar", func(t *testing.T) {
|
||||
_, err := node2Client.CertificatesV1alpha1().PodCertificateRequests("foo").Get(ctx, "bar", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting foo/bar as node2: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Delete bar/foo
|
||||
err = client.CertificatesV1alpha1().PodCertificateRequests("bar").Delete(ctx, "foo", metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error deleting pcr bar/foo: %v", err)
|
||||
}
|
||||
|
||||
t.Run("node2 can still directly get foo/bar after bar/foo was deleted", func(t *testing.T) {
|
||||
_, err := node2Client.CertificatesV1alpha1().PodCertificateRequests("foo").Get(ctx, "bar", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting foo/bar as node2: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func serviceAccountClient(cfg *restclient.Config, ns, sa string) (*clientset.Clientset, error) {
|
||||
newCfg := restclient.CopyConfig(cfg)
|
||||
newCfg.Impersonate.UserName = fmt.Sprintf("system:serviceaccount:%s:%s", ns, sa)
|
||||
newCfg.Impersonate.Groups = []string{"system:authenticated", "system:serviceaccounts"}
|
||||
return clientset.NewForConfig(newCfg)
|
||||
}
|
||||
|
||||
func nodeClient(cfg *restclient.Config, node string) (*clientset.Clientset, error) {
|
||||
newCfg := restclient.CopyConfig(cfg)
|
||||
newCfg.Impersonate.UserName = fmt.Sprintf("system:node:%s", node)
|
||||
newCfg.Impersonate.Groups = []string{"system:authenticated", "system:nodes"}
|
||||
return clientset.NewForConfig(newCfg)
|
||||
}
|
||||
|
||||
func mustMakeEd25519KeyAndProof(t *testing.T, toBeSigned []byte) (ed25519.PrivateKey, ed25519.PublicKey, []byte, []byte) {
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while generating ed25519 key: %v", err)
|
||||
}
|
||||
pubPKIX, err := x509.MarshalPKIXPublicKey(pub)
|
||||
if err != nil {
|
||||
t.Fatalf("Error while marshaling PKIX public key: %v", err)
|
||||
}
|
||||
sig := ed25519.Sign(priv, toBeSigned)
|
||||
return priv, pub, pubPKIX, sig
|
||||
}
|
||||
|
|
@ -0,0 +1,320 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package hermeticpodcertificatesigner
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
certsv1alpha1 "k8s.io/api/certificates/v1alpha1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
certinformersv1alpha1 "k8s.io/client-go/informers/certificates/v1alpha1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
certlistersv1alpha1 "k8s.io/client-go/listers/certificates/v1alpha1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
// Controller is an in-memory signing controller for PodCertificateRequests.
|
||||
type Controller struct {
|
||||
clock clock.PassiveClock
|
||||
|
||||
signerName string
|
||||
|
||||
kc kubernetes.Interface
|
||||
pcrInformer cache.SharedIndexInformer
|
||||
pcrQueue workqueue.TypedRateLimitingInterface[string]
|
||||
|
||||
caKeys []crypto.PrivateKey
|
||||
caCerts [][]byte
|
||||
}
|
||||
|
||||
// New creates a new Controller.
|
||||
func New(clock clock.PassiveClock, signerName string, caKeys []crypto.PrivateKey, caCerts [][]byte, kc kubernetes.Interface) *Controller {
|
||||
pcrInformer := certinformersv1alpha1.NewFilteredPodCertificateRequestInformer(kc, metav1.NamespaceAll, 24*time.Hour, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
func(opts *metav1.ListOptions) {
|
||||
opts.FieldSelector = fields.OneTermEqualSelector("spec.signerName", signerName).String()
|
||||
},
|
||||
)
|
||||
|
||||
sc := &Controller{
|
||||
clock: clock,
|
||||
signerName: signerName,
|
||||
kc: kc,
|
||||
pcrInformer: pcrInformer,
|
||||
pcrQueue: workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]()),
|
||||
caKeys: caKeys,
|
||||
caCerts: caCerts,
|
||||
}
|
||||
|
||||
sc.pcrInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(new any) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(new)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sc.pcrQueue.Add(key)
|
||||
},
|
||||
UpdateFunc: func(old, new any) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(new)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sc.pcrQueue.Add(key)
|
||||
},
|
||||
DeleteFunc: func(old any) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(old)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
sc.pcrQueue.Add(key)
|
||||
},
|
||||
})
|
||||
|
||||
return sc
|
||||
}
|
||||
|
||||
func (c *Controller) Run(ctx context.Context) {
|
||||
defer c.pcrQueue.ShutDown()
|
||||
go c.pcrInformer.Run(ctx.Done())
|
||||
if !cache.WaitForCacheSync(ctx.Done(), c.pcrInformer.HasSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
go wait.UntilWithContext(ctx, c.runWorker, time.Second)
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker(ctx context.Context) {
|
||||
for c.processNextWorkItem(ctx) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
|
||||
key, quit := c.pcrQueue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer c.pcrQueue.Done(key)
|
||||
|
||||
klog.InfoS("Processing PCR", "key", key)
|
||||
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error while splitting key into namespace and name", "key", key)
|
||||
return true
|
||||
}
|
||||
|
||||
pcr, err := certlistersv1alpha1.NewPodCertificateRequestLister(c.pcrInformer.GetIndexer()).PodCertificateRequests(namespace).Get(name)
|
||||
if k8serrors.IsNotFound(err) {
|
||||
c.pcrQueue.Forget(key)
|
||||
return true
|
||||
} else if err != nil {
|
||||
klog.ErrorS(err, "Error while retrieving PodCertificateRequest", "key", key)
|
||||
return true
|
||||
}
|
||||
|
||||
err = c.handlePCR(ctx, pcr)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Error while handling PodCertificateRequest", "key", key)
|
||||
c.pcrQueue.AddRateLimited(key)
|
||||
return true
|
||||
}
|
||||
|
||||
c.pcrQueue.Forget(key)
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) handlePCR(ctx context.Context, pcr *certsv1alpha1.PodCertificateRequest) error {
|
||||
if pcr.Spec.SignerName != c.signerName {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PodCertificateRequests don't have an approval stage, and the node
|
||||
// restriction / isolation check is handled by kube-apiserver.
|
||||
|
||||
// If our signer had a policy about which pods are allowed to request
|
||||
// certificates, it would be implemented here.
|
||||
|
||||
// Proceed to signing. Our toy signer will make a SPIFFE cert encoding the
|
||||
// namespace and name of the pod's service account.
|
||||
|
||||
// Is the PCR already signed?
|
||||
if pcr.Status.CertificateChain != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
subjectPublicKey, err := x509.ParsePKIXPublicKey(pcr.Spec.PKIXPublicKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("while parsing subject public key: %w", err)
|
||||
}
|
||||
|
||||
// If our signer had an opinion on which key types were allowable, it would
|
||||
// check subjectPublicKey, and deny the PCR with a SuggestedKeyType
|
||||
// condition on it.
|
||||
|
||||
lifetime := 24 * time.Hour
|
||||
requestedLifetime := time.Duration(*pcr.Spec.MaxExpirationSeconds) * time.Second
|
||||
if requestedLifetime < lifetime {
|
||||
lifetime = requestedLifetime
|
||||
}
|
||||
|
||||
spiffeURI := &url.URL{
|
||||
Scheme: "spiffe",
|
||||
Host: "cluster.local",
|
||||
Path: path.Join("ns", pcr.ObjectMeta.Namespace, "sa", pcr.Spec.ServiceAccountName),
|
||||
}
|
||||
|
||||
notBefore := c.clock.Now().Add(-2 * time.Minute)
|
||||
notAfter := notBefore.Add(lifetime)
|
||||
beginRefreshAt := notAfter.Add(-30 * time.Minute)
|
||||
template := &x509.Certificate{
|
||||
URIs: []*url.URL{spiffeURI},
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
|
||||
}
|
||||
|
||||
signingCert, err := x509.ParseCertificate(c.caCerts[len(c.caCerts)-1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("while parsing signing certificate: %w", err)
|
||||
}
|
||||
|
||||
subjectCertDER, err := x509.CreateCertificate(rand.Reader, template, signingCert, subjectPublicKey, c.caKeys[len(c.caKeys)-1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("while signing subject cert: %w", err)
|
||||
}
|
||||
|
||||
// Compose the certificate chain
|
||||
chainPEM := &bytes.Buffer{}
|
||||
err = pem.Encode(chainPEM, &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: subjectCertDER,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("while encoding leaf certificate to PEM: %w", err)
|
||||
}
|
||||
for i := 0; i < len(c.caCerts)-1; i++ {
|
||||
err = pem.Encode(chainPEM, &pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: c.caCerts[len(c.caCerts)-1-i],
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("while encoding intermediate certificate to PEM: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Don't modify the copy in the informer cache.
|
||||
pcr = pcr.DeepCopy()
|
||||
pcr.Status.Conditions = []metav1.Condition{
|
||||
{
|
||||
Type: certsv1alpha1.PodCertificateRequestConditionTypeIssued,
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "Reason",
|
||||
Message: "Issued",
|
||||
LastTransitionTime: metav1.NewTime(c.clock.Now()),
|
||||
},
|
||||
}
|
||||
pcr.Status.CertificateChain = chainPEM.String()
|
||||
pcr.Status.NotBefore = ptr.To(metav1.NewTime(notBefore))
|
||||
pcr.Status.BeginRefreshAt = ptr.To(metav1.NewTime(beginRefreshAt))
|
||||
pcr.Status.NotAfter = ptr.To(metav1.NewTime(notAfter))
|
||||
|
||||
_, err = c.kc.CertificatesV1alpha1().PodCertificateRequests(pcr.ObjectMeta.Namespace).UpdateStatus(ctx, pcr, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("while updating PodCertificateRequest: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateCAHierarchy makes a CA hierarchy, possibly with intermediates. The
|
||||
// outputs can be used with Controller.
|
||||
func GenerateCAHierarchy(numIntermediates int) ([]crypto.PrivateKey, [][]byte, error) {
|
||||
caKeys := []crypto.PrivateKey{}
|
||||
caCerts := [][]byte{}
|
||||
|
||||
rootPubKey, rootPrivKey, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("while generating root key: %w", err)
|
||||
}
|
||||
|
||||
notBefore := time.Now()
|
||||
notAfter := notBefore.Add(365 * 24 * time.Hour)
|
||||
|
||||
rootTemplate := &x509.Certificate{
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
IsCA: true,
|
||||
BasicConstraintsValid: true,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
}
|
||||
|
||||
rootDER, err := x509.CreateCertificate(rand.Reader, rootTemplate, rootTemplate, rootPubKey, rootPrivKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("while generating root certificate: %w", err)
|
||||
}
|
||||
|
||||
caKeys = append(caKeys, rootPrivKey)
|
||||
caCerts = append(caCerts, rootDER)
|
||||
|
||||
for i := 0; i < numIntermediates; i++ {
|
||||
pubKey, privKey, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("while generating intermediate key: %w", err)
|
||||
}
|
||||
|
||||
template := &x509.Certificate{
|
||||
NotBefore: notBefore,
|
||||
NotAfter: notAfter,
|
||||
IsCA: true,
|
||||
BasicConstraintsValid: true,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
}
|
||||
|
||||
signingCert, err := x509.ParseCertificate(caCerts[len(caCerts)-1])
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("while parsing previous cert: %w", err)
|
||||
}
|
||||
|
||||
intermediateDER, err := x509.CreateCertificate(rand.Reader, template, signingCert, pubKey, caKeys[len(caCerts)-1])
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("while signing intermediate certificate: %w", err)
|
||||
}
|
||||
|
||||
caKeys = append(caKeys, privKey)
|
||||
caCerts = append(caCerts, intermediateDER)
|
||||
}
|
||||
|
||||
return caKeys, caCerts, nil
|
||||
}
|
||||
Loading…
Reference in a new issue