mirror of
https://github.com/kubernetes/kubernetes.git
synced 2026-02-03 20:40:26 -05:00
Merge 098f2c6ec6 into 44ff6085cd
This commit is contained in:
commit
025e2b4854
6 changed files with 513 additions and 8 deletions
|
|
@ -75,6 +75,10 @@ type RuntimeHelper interface {
|
|||
|
||||
// PodCPUAndMemoryStats reads the latest CPU & memory usage stats.
|
||||
PodCPUAndMemoryStats(context.Context, *v1.Pod, *PodStatus) (*statsapi.PodStats, error)
|
||||
|
||||
// OnPodSandboxReady callback is invoked after pod sandbox, networking, volume are ready.
|
||||
// This is used to update the PodReadyToStartContainers condition.
|
||||
OnPodSandboxReady(ctx context.Context, pod *v1.Pod) error
|
||||
}
|
||||
|
||||
// ShouldContainerBeRestarted checks whether a container needs to be restarted.
|
||||
|
|
|
|||
|
|
@ -128,3 +128,8 @@ func (f *FakeRuntimeHelper) PodCPUAndMemoryStats(_ context.Context, pod *v1.Pod,
|
|||
}
|
||||
return nil, fmt.Errorf("stats for pod %q not found", pod.UID)
|
||||
}
|
||||
|
||||
func (f *FakeRuntimeHelper) OnPodSandboxReady(_ context.Context, _ *v1.Pod) error {
|
||||
// Not implemented
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -50,6 +50,7 @@ import (
|
|||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
|
||||
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
|
||||
utilpod "k8s.io/kubernetes/pkg/util/pod"
|
||||
netutils "k8s.io/utils/net"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
|
|
@ -3381,3 +3382,45 @@ func (kl *Kubelet) fastStaticPodsRegistration(ctx context.Context) {
|
|||
func (kl *Kubelet) SetPodWatchCondition(podUID types.UID, conditionKey string, condition pleg.WatchCondition) {
|
||||
kl.pleg.SetPodWatchCondition(podUID, conditionKey, condition)
|
||||
}
|
||||
|
||||
// OnPodSandboxReady is the callback implementation invoked by the container runtime after
|
||||
// all three requirements (sandbox, networking, volumes) are ready to immediately update
|
||||
// the `PodReadyToStartContainers` pod status condition to `True`.
|
||||
// This method implements the RuntimeHelper interface.
|
||||
// Ref: https://github.com/kubernetes/kubernetes/issues/134460
|
||||
func (kl *Kubelet) OnPodSandboxReady(ctx context.Context, pod *v1.Pod) error {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.PodReadyToStartContainersCondition) {
|
||||
return nil
|
||||
}
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
logger.V(3).Info("OnPodSandboxReady callback invoked", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||
|
||||
existingStatus, ok := kl.statusManager.GetPodStatus(pod.UID)
|
||||
if !ok {
|
||||
existingStatus = pod.Status
|
||||
}
|
||||
|
||||
readySandboxCondition := v1.PodCondition{
|
||||
Type: v1.PodReadyToStartContainers,
|
||||
Status: v1.ConditionTrue,
|
||||
ObservedGeneration: podutil.CalculatePodConditionObservedGeneration(&existingStatus, pod.Generation, v1.PodReadyToStartContainers),
|
||||
}
|
||||
|
||||
lastTransitionTime := metav1.Now()
|
||||
_, existingCondition := podutil.GetPodCondition(&existingStatus, v1.PodReadyToStartContainers)
|
||||
if existingCondition != nil && existingCondition.Status == readySandboxCondition.Status {
|
||||
lastTransitionTime = existingCondition.LastTransitionTime
|
||||
}
|
||||
readySandboxCondition.LastTransitionTime = lastTransitionTime
|
||||
|
||||
existingStatus.Conditions = utilpod.ReplaceOrAppendPodCondition(existingStatus.Conditions, &readySandboxCondition)
|
||||
|
||||
kl.statusManager.SetPodStatus(logger, pod, existingStatus)
|
||||
|
||||
logger.V(3).Info("Successfully updated PodReadyToStartContainers condition after sandbox creation",
|
||||
"pod", klog.KObj(pod),
|
||||
"podUID", pod.UID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1387,10 +1387,11 @@ func (m *kubeGenericRuntimeManager) computePodLevelResourcesResizeAction(ctx con
|
|||
// 2. Kill pod sandbox if necessary.
|
||||
// 3. Kill any containers that should not be running.
|
||||
// 4. Create sandbox if necessary.
|
||||
// 5. Create ephemeral containers.
|
||||
// 6. Create init containers.
|
||||
// 7. Resize running containers (if InPlacePodVerticalScaling==true)
|
||||
// 8. Create normal containers.
|
||||
// 5. Invoke OnPodSandboxReady to notify Kubelet to update pod status.
|
||||
// 6. Create ephemeral containers.
|
||||
// 7. Create init containers.
|
||||
// 8. Resize running containers (if InPlacePodVerticalScaling==true)
|
||||
// 9. Create normal containers.
|
||||
func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff, restartAllContainers bool) (result kubecontainer.PodSyncResult) {
|
||||
logger := klog.FromContext(ctx)
|
||||
// Step 1: Compute sandbox and container changes.
|
||||
|
|
@ -1588,6 +1589,16 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
|||
podIPs = m.determinePodSandboxIPs(ctx, pod.Namespace, pod.Name, resp.GetStatus())
|
||||
logger.V(4).Info("Determined the ip for pod after sandbox changed", "IPs", podIPs, "pod", klog.KObj(pod))
|
||||
}
|
||||
|
||||
// Step 5: invoke the sandbox ready callback before image pulling .
|
||||
// At this point, dynamic resources are prepared (at PrepareDynamicResources() call above)
|
||||
// and volumes are already mounted (at the kubelet SyncPod() level), so,
|
||||
// all requirements (sandbox, networking, volumes) are met to set `PodReadyToStartContainers=True` condition.
|
||||
logger.V(4).Info("Pod sandbox and network ready, invoking callback", "pod", klog.KObj(pod), "podIPs", podIPs)
|
||||
if err := m.runtimeHelper.OnPodSandboxReady(ctx, pod); err != nil {
|
||||
// log the error but continue the pod creation process, to retain the existing behaviour
|
||||
logger.Error(err, "Failed to invoke sandbox ready callback, continuing with pod creation", "pod", klog.KObj(pod))
|
||||
}
|
||||
}
|
||||
|
||||
// the start containers routines depend on pod ip(as in primary pod ip)
|
||||
|
|
@ -1669,7 +1680,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
|||
return nil
|
||||
}
|
||||
|
||||
// Step 5: start ephemeral containers
|
||||
// Step 6: start ephemeral containers
|
||||
// These are started "prior" to init containers to allow running ephemeral containers even when there
|
||||
// are errors starting an init container. In practice init containers will start first since ephemeral
|
||||
// containers cannot be specified on pod creation.
|
||||
|
|
@ -1677,7 +1688,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
|||
start(ctx, "ephemeral container", metrics.EphemeralContainer, ephemeralContainerStartSpec(&pod.Spec.EphemeralContainers[idx]))
|
||||
}
|
||||
|
||||
// Step 6: start init containers.
|
||||
// Step 7: start init containers.
|
||||
for _, idx := range podContainerChanges.InitContainersToStart {
|
||||
container := &pod.Spec.InitContainers[idx]
|
||||
// Start the next init container.
|
||||
|
|
@ -1694,14 +1705,14 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
|||
logger.V(4).Info("Completed init container for pod", "containerName", container.Name, "pod", klog.KObj(pod))
|
||||
}
|
||||
|
||||
// Step 7: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] list, invoke UpdateContainerResources
|
||||
// Step 8: For containers in podContainerChanges.ContainersToUpdate[CPU,Memory] list, invoke UpdateContainerResources
|
||||
if resizable, _, _ := allocation.IsInPlacePodVerticalScalingAllowed(pod); resizable {
|
||||
if len(podContainerChanges.ContainersToUpdate) > 0 || podContainerChanges.UpdatePodResources || podContainerChanges.UpdatePodLevelResources {
|
||||
result.SyncResults = append(result.SyncResults, m.doPodResizeAction(ctx, pod, podStatus, podContainerChanges))
|
||||
}
|
||||
}
|
||||
|
||||
// Step 8: start containers in podContainerChanges.ContainersToStart.
|
||||
// Step 9: start containers in podContainerChanges.ContainersToStart.
|
||||
for _, idx := range podContainerChanges.ContainersToStart {
|
||||
start(ctx, "container", metrics.Container, containerStartSpec(&pod.Spec.Containers[idx]))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5121,3 +5121,287 @@ func TestCmpActuatedAllocated(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
// testRuntimeHelper implements the RuntimeHelper interface for testing OnPodSandboxReady invocation.
|
||||
type testRuntimeHelper struct {
|
||||
*containertest.FakeRuntimeHelper
|
||||
onPodSandboxReadyCalled bool
|
||||
onPodSandboxReadyPod *v1.Pod
|
||||
onPodSandboxReadyCtx context.Context
|
||||
onPodSandboxReadyError error
|
||||
captureStateFunc func() // optional function to capture state when OnPodSandboxReady is called
|
||||
prepareDynamicResourcesCalled bool
|
||||
prepareDynamicResourcesError error
|
||||
}
|
||||
|
||||
func (t *testRuntimeHelper) OnPodSandboxReady(ctx context.Context, pod *v1.Pod) error {
|
||||
t.onPodSandboxReadyCalled = true
|
||||
t.onPodSandboxReadyPod = pod
|
||||
t.onPodSandboxReadyCtx = ctx
|
||||
if t.captureStateFunc != nil {
|
||||
t.captureStateFunc()
|
||||
}
|
||||
return t.onPodSandboxReadyError
|
||||
}
|
||||
|
||||
func (t *testRuntimeHelper) PrepareDynamicResources(ctx context.Context, pod *v1.Pod) error {
|
||||
t.prepareDynamicResourcesCalled = true
|
||||
return t.prepareDynamicResourcesError
|
||||
}
|
||||
|
||||
// TestOnPodSandboxReadyInvocation verifies OnPodSandboxReady is called at the correct time
|
||||
// and validates the order between the DRA allocate calls and PodReadytoStartContainers condition.
|
||||
// It works in the following order:
|
||||
// 1. setup test helper and inject errors
|
||||
// 2. create pod (with/without devices)
|
||||
// 3. run SyncPod
|
||||
// 4. verify device allocation
|
||||
// 5. verify OnPodSandboxReady invocation
|
||||
// 6. verify final pod state
|
||||
func TestOnPodSandboxReadyInvocation(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
onPodSandboxReadyShouldErr bool
|
||||
deviceAllocationShouldErr bool
|
||||
expectOnPodSandboxReady bool
|
||||
expectSyncPodSuccess bool
|
||||
expectDeviceAllocation bool
|
||||
enablePodReadyToStartContainers bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "OnPodSandboxReady succeeds with feature enabled",
|
||||
onPodSandboxReadyShouldErr: false,
|
||||
deviceAllocationShouldErr: false,
|
||||
expectOnPodSandboxReady: true,
|
||||
expectSyncPodSuccess: true,
|
||||
expectDeviceAllocation: false,
|
||||
enablePodReadyToStartContainers: true,
|
||||
description: "Verifies OnPodSandboxReady is called and succeeds with PodReadyToStartContainersCondition feature gate enabled",
|
||||
},
|
||||
{
|
||||
name: "OnPodSandboxReady succeeds with feature disabled",
|
||||
onPodSandboxReadyShouldErr: false,
|
||||
deviceAllocationShouldErr: false,
|
||||
expectOnPodSandboxReady: true,
|
||||
expectSyncPodSuccess: true,
|
||||
expectDeviceAllocation: false,
|
||||
enablePodReadyToStartContainers: false,
|
||||
description: "Verifies OnPodSandboxReady is called and succeeds with PodReadyToStartContainersCondition feature gate disabled",
|
||||
},
|
||||
{
|
||||
name: "OnPodSandboxReady fails but SyncPod continues with feature enabled",
|
||||
onPodSandboxReadyShouldErr: true,
|
||||
deviceAllocationShouldErr: false,
|
||||
expectOnPodSandboxReady: true,
|
||||
expectSyncPodSuccess: true, // SyncPod still succeed even if OnPodSandboxReady fails
|
||||
expectDeviceAllocation: false,
|
||||
enablePodReadyToStartContainers: true,
|
||||
description: "Verifies OnPodSandboxReady errors don't block pod creation with PodReadyToStartContainersCondition feature gate enabled",
|
||||
},
|
||||
{
|
||||
name: "OnPodSandboxReady fails but SyncPod continues with feature disabled",
|
||||
onPodSandboxReadyShouldErr: true,
|
||||
deviceAllocationShouldErr: false,
|
||||
expectOnPodSandboxReady: true,
|
||||
expectSyncPodSuccess: true, // SyncPod still succeed even if OnPodSandboxReady fails
|
||||
expectDeviceAllocation: false,
|
||||
enablePodReadyToStartContainers: false,
|
||||
description: "Verifies OnPodSandboxReady errors don't block pod creation with PodReadyToStartContainersCondition feature gate disabled",
|
||||
},
|
||||
{
|
||||
name: "PrepareDynamicResources (device allocation) called before OnPodSandboxReady with feature enabled",
|
||||
onPodSandboxReadyShouldErr: false,
|
||||
deviceAllocationShouldErr: false,
|
||||
expectOnPodSandboxReady: true,
|
||||
expectSyncPodSuccess: true,
|
||||
expectDeviceAllocation: true,
|
||||
enablePodReadyToStartContainers: true,
|
||||
description: "Verifies the order (PrepareDynamicResources -> OnPodSandboxReady) in case of pod with ResourceClaims with PodReadyToStartContainersCondition feature gate enabled",
|
||||
},
|
||||
{
|
||||
name: "PrepareDynamicResources (device allocation) called before OnPodSandboxReady with feature disabled",
|
||||
onPodSandboxReadyShouldErr: false,
|
||||
deviceAllocationShouldErr: false,
|
||||
expectOnPodSandboxReady: true,
|
||||
expectSyncPodSuccess: true,
|
||||
expectDeviceAllocation: true,
|
||||
enablePodReadyToStartContainers: false,
|
||||
description: "Verifies the order (PrepareDynamicResources -> OnPodSandboxReady) in case of pod with ResourceClaims with PodReadyToStartContainersCondition feature gate disabled",
|
||||
},
|
||||
{
|
||||
name: "PrepareDynamicResources (device allocation) failure prevents sandbox creation with feature enabled",
|
||||
onPodSandboxReadyShouldErr: false,
|
||||
deviceAllocationShouldErr: true,
|
||||
expectOnPodSandboxReady: false,
|
||||
expectSyncPodSuccess: true, // SyncPod doesn't return error, just returns early if `PrepareDynamicResources` call ends up failing
|
||||
expectDeviceAllocation: true,
|
||||
enablePodReadyToStartContainers: true,
|
||||
description: "Verifies PrepareDynamicResources failure causes early return in case of pod with ResourceClaims with PodReadyToStartContainersCondition feature gate enabled",
|
||||
},
|
||||
{
|
||||
name: "PrepareDynamicResources (device allocation) failure prevents sandbox creation with feature disabled",
|
||||
onPodSandboxReadyShouldErr: false,
|
||||
deviceAllocationShouldErr: true,
|
||||
expectOnPodSandboxReady: false,
|
||||
expectSyncPodSuccess: true, // SyncPod doesn't return error, just returns early if `PrepareDynamicResources` call ends up failing
|
||||
expectDeviceAllocation: true,
|
||||
enablePodReadyToStartContainers: false,
|
||||
description: "Verifies PrepareDynamicResources failure causes early return in case of pod with ResourceClaims with PodReadyToStartContainersCondition feature gate disabled",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, test.enablePodReadyToStartContainers)
|
||||
|
||||
// step 1 - setup test helper and inject errors
|
||||
fakeRuntime, fakeImage, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
testHelper := &testRuntimeHelper{
|
||||
FakeRuntimeHelper: &containertest.FakeRuntimeHelper{},
|
||||
}
|
||||
if test.onPodSandboxReadyShouldErr {
|
||||
testHelper.onPodSandboxReadyError = fmt.Errorf("OnPodSandboxReady intentionally failed for testing")
|
||||
}
|
||||
if test.deviceAllocationShouldErr {
|
||||
testHelper.prepareDynamicResourcesError = fmt.Errorf("PrepareDynamicResources intentionally failed for testing")
|
||||
}
|
||||
m.runtimeHelper = testHelper
|
||||
|
||||
// step 2 - create pod (with/without devices)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "test-pod-uid",
|
||||
Name: "test-pod",
|
||||
Namespace: "test-namespace",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container",
|
||||
Image: "busybox",
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if test.expectDeviceAllocation {
|
||||
pod.Spec.ResourceClaims = []v1.PodResourceClaim{
|
||||
{
|
||||
Name: "test-device",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// step 3 - run SyncPod
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
result := m.SyncPod(tCtx, pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff, false)
|
||||
|
||||
if test.expectSyncPodSuccess {
|
||||
require.NoError(t, result.Error(), test.description)
|
||||
} else {
|
||||
require.Error(t, result.Error(), test.description)
|
||||
}
|
||||
|
||||
// step 4 - verify device allocation
|
||||
if test.expectDeviceAllocation {
|
||||
require.True(t, testHelper.prepareDynamicResourcesCalled,
|
||||
"PrepareDynamicResources should be called for pods with resource claims")
|
||||
|
||||
if test.expectOnPodSandboxReady && testHelper.onPodSandboxReadyCalled {
|
||||
require.True(t, testHelper.prepareDynamicResourcesCalled,
|
||||
"PrepareDynamicResources must be called before OnPodSandboxReady")
|
||||
}
|
||||
}
|
||||
|
||||
// step 5 - verify OnPodSandboxReady invocation
|
||||
assert.Equal(t, test.expectOnPodSandboxReady, testHelper.onPodSandboxReadyCalled, "OnPodSandboxReady invocation mismatch: "+test.description)
|
||||
|
||||
if test.expectOnPodSandboxReady {
|
||||
assert.NotNil(t, testHelper.onPodSandboxReadyPod, "OnPodSandboxReady should receive pod")
|
||||
assert.Equal(t, pod.UID, testHelper.onPodSandboxReadyPod.UID, "OnPodSandboxReady should receive correct pod UID")
|
||||
assert.Equal(t, pod.Name, testHelper.onPodSandboxReadyPod.Name, "OnPodSandboxReady should receive correct pod name")
|
||||
assert.Equal(t, pod.Namespace, testHelper.onPodSandboxReadyPod.Namespace, "OnPodSandboxReady should receive correct pod namespace")
|
||||
assert.NotNil(t, testHelper.onPodSandboxReadyCtx, "OnPodSandboxReady should receive context")
|
||||
|
||||
require.Len(t, fakeRuntime.Sandboxes, 1, "sandbox should be created before OnPodSandboxReady")
|
||||
for _, sandbox := range fakeRuntime.Sandboxes {
|
||||
require.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State, "sandbox should be ready when OnPodSandboxReady is invoked")
|
||||
}
|
||||
}
|
||||
|
||||
// step 6 - verify the final pod state
|
||||
if test.expectSyncPodSuccess && !test.deviceAllocationShouldErr {
|
||||
assert.Len(t, fakeRuntime.Containers, 1, "container should be created")
|
||||
assert.Len(t, fakeImage.Images, 1, "image should be pulled")
|
||||
for _, c := range fakeRuntime.Containers {
|
||||
assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.State, "container should be running")
|
||||
}
|
||||
}
|
||||
|
||||
if test.deviceAllocationShouldErr {
|
||||
require.Empty(t, fakeRuntime.Sandboxes, "sandbox should not be created when device allocation fails")
|
||||
require.Empty(t, fakeRuntime.Containers, "containers should not be created when device allocation fails")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestOnPodSandboxReadyTiming tests that OnPodSandboxReady is invoked after sandbox
|
||||
// creation and network setup but before image pulling.
|
||||
func TestOnPodSandboxReadyTiming(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
fakeRuntime, fakeImage, m, err := createTestRuntimeManager(tCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// track the state of pod when OnPodSandboxReady is invoked
|
||||
var sandboxCount int
|
||||
var containerCount int
|
||||
var imageCount int
|
||||
|
||||
testHelper := &testRuntimeHelper{
|
||||
FakeRuntimeHelper: &containertest.FakeRuntimeHelper{},
|
||||
captureStateFunc: func() {
|
||||
sandboxCount = len(fakeRuntime.Sandboxes)
|
||||
containerCount = len(fakeRuntime.Containers)
|
||||
imageCount = len(fakeImage.Images)
|
||||
},
|
||||
}
|
||||
|
||||
m.runtimeHelper = testHelper
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "timing-test-pod",
|
||||
Name: "timing-test",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container",
|
||||
Image: "busybox",
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
result := m.SyncPod(tCtx, pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff, false)
|
||||
require.NoError(t, result.Error())
|
||||
|
||||
// verify the order that OnPodSandboxReady should be invoked after sandbox creation but before containers
|
||||
assert.Equal(t, 1, sandboxCount, "sandbox should exist when OnPodSandboxReady is invoked")
|
||||
assert.Equal(t, 0, containerCount, "containers should not exist yet when OnPodSandboxReady is invoked")
|
||||
// Note that image may or may not be pulled at OnPodSandboxReady time depending on whether image exists
|
||||
t.Logf("At OnPodSandboxReady time: sandboxes=%d, containers=%d, images=%d", sandboxCount, containerCount, imageCount)
|
||||
|
||||
// verify the final state of pod
|
||||
assert.Len(t, fakeRuntime.Sandboxes, 1, "final sandbox count")
|
||||
assert.Len(t, fakeRuntime.Containers, 1, "final container count")
|
||||
}
|
||||
|
|
|
|||
158
test/e2e_node/pod_conditions_criproxy_linux.go
Normal file
158
test/e2e_node/pod_conditions_criproxy_linux.go
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
//go:build linux
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2enode
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/test/e2e/feature"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e_node/criproxy"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
// PodReadyToStartContainers condition timing tests with CRI Proxy delays (Linux only)
|
||||
var _ = SIGDescribe("Pod conditions managed by Kubelet", func() {
|
||||
f := framework.NewDefaultFramework("pod-conditions")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
|
||||
|
||||
f.Context("including PodReadyToStartContainers condition", f.WithSerial(), framework.WithFeatureGate(features.PodReadyToStartContainersCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
if initialConfig.FeatureGates == nil {
|
||||
initialConfig.FeatureGates = map[string]bool{}
|
||||
}
|
||||
})
|
||||
|
||||
f.Context("timing with CRI Proxy delays", feature.CriProxy, func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
if e2eCriProxy == nil {
|
||||
ginkgo.Skip("Skip the test since the CRI Proxy is undefined. Please run with --cri-proxy-enabled=true")
|
||||
}
|
||||
if err := resetCRIProxyInjector(e2eCriProxy); err != nil {
|
||||
ginkgo.Skip("Skip the test since the CRI Proxy is undefined.")
|
||||
}
|
||||
ginkgo.DeferCleanup(func() error {
|
||||
return resetCRIProxyInjector(e2eCriProxy)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("a pod without init containers should report PodReadyToStartContainers condition set before image pull completes", runPodReadyToStartContainersTimingTest(f, false))
|
||||
ginkgo.It("a pod with init containers should report PodReadyToStartContainers condition set before image pull completes", runPodReadyToStartContainersTimingTest(f, true))
|
||||
})
|
||||
|
||||
addAfterEachForCleaningUpPods(f)
|
||||
})
|
||||
})
|
||||
|
||||
// newPullImageAlwaysPodWithInitContainer creates a pod with init container and ImagePullPolicy: Always
|
||||
func newPullImageAlwaysPodWithInitContainer() *v1.Pod {
|
||||
podName := "cri-proxy-test-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Name: "init",
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
Command: []string{"sh", "-c", "sleep 2"},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Name: "main",
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func runPodReadyToStartContainersTimingTest(f *framework.Framework, hasInitContainers bool) func(ctx context.Context) {
|
||||
return func(ctx context.Context) {
|
||||
const delayTime = 15 * time.Second
|
||||
|
||||
ginkgo.By("Injecting delay into PullImage calls")
|
||||
err := addCRIProxyInjector(e2eCriProxy, func(apiName string) error {
|
||||
if apiName == criproxy.PullImage {
|
||||
ginkgo.By(fmt.Sprintf("Delaying PullImage by %v", delayTime))
|
||||
time.Sleep(delayTime)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating test pod with ImagePullPolicy: Always")
|
||||
var testPod *v1.Pod
|
||||
if hasInitContainers {
|
||||
testPod = newPullImageAlwaysPodWithInitContainer()
|
||||
} else {
|
||||
testPod = newPullImageAlwaysPod()
|
||||
}
|
||||
testPod = e2epod.NewPodClient(f).Create(ctx, testPod)
|
||||
|
||||
ginkgo.By("Waiting for PodReadyToStartContainers condition to be set")
|
||||
gomega.Eventually(func() error {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, testPod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = getTransitionTimeForPodConditionWithStatus(pod, v1.PodReadyToStartContainers, true)
|
||||
return err
|
||||
}).WithPolling(500*time.Millisecond).WithTimeout(10*time.Second).Should(gomega.Succeed(),
|
||||
"PodReadyToStartContainers condition should be set to True within %v", 10*time.Second)
|
||||
|
||||
ginkgo.By("Verifying condition timing, it should be set quickly before image pull delay")
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, testPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
conditionTime, err := getTransitionTimeForPodConditionWithStatus(pod, v1.PodReadyToStartContainers, true)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Waiting for pod to eventually become Running after image pull")
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, testPod))
|
||||
|
||||
ginkgo.By("Verifying condition was set before image pull completed")
|
||||
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, testPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
podReadyTime, err := getTransitionTimeForPodConditionWithStatus(pod, v1.PodReady, true)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
gomega.Expect(conditionTime.Before(podReadyTime)).To(gomega.BeTrueBecause(
|
||||
"PodReadyToStartContainers was set at %v but PodReady was set at %v - condition should be set before image pull completes",
|
||||
conditionTime, podReadyTime))
|
||||
}
|
||||
}
|
||||
Loading…
Reference in a new issue