mirror of
https://github.com/kubernetes/kubernetes.git
synced 2026-02-03 20:40:26 -05:00
Merge pull request #136341 from Karthik-K-N/remove-deprecated-methods
Remove usage of deprecated functions from ktesting package
This commit is contained in:
commit
eba75de156
26 changed files with 116 additions and 128 deletions
|
|
@ -1631,7 +1631,7 @@ func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestReplicaSetAvailabilityCheck(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
tCtx := ktesting.Init(t)
|
||||
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
rs := newReplicaSet(4, labelMap)
|
||||
|
|
@ -1665,7 +1665,7 @@ func TestReplicaSetAvailabilityCheck(t *testing.T) {
|
|||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
|
||||
err := manager.syncReplicaSet(ctx, GetKey(rs, t))
|
||||
err := manager.syncReplicaSet(tCtx, GetKey(rs, t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -1703,9 +1703,7 @@ func TestReplicaSetAvailabilityCheck(t *testing.T) {
|
|||
}
|
||||
|
||||
// RS should be re-queued after 700ms to recompute .status.availableReplicas (200ms extra for the test).
|
||||
ktesting.Eventually(ctx, func(tCtx ktesting.TContext) int {
|
||||
return manager.queue.Len()
|
||||
}).WithTimeout(900*time.Millisecond).
|
||||
tCtx.Eventually(manager.queue.Len).WithTimeout(900*time.Millisecond).
|
||||
WithPolling(10*time.Millisecond).
|
||||
Should(gomega.Equal(1), " RS should be re-queued to recompute .status.availableReplicas")
|
||||
|
||||
|
|
|
|||
|
|
@ -438,7 +438,6 @@ func TestSyncHandler(t *testing.T) {
|
|||
// Run sequentially because of global logging and global metrics.
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
tCtx = ktesting.WithCancel(tCtx)
|
||||
|
||||
var objects []runtime.Object
|
||||
for _, pod := range tc.pods {
|
||||
|
|
@ -533,7 +532,6 @@ func TestSyncHandler(t *testing.T) {
|
|||
|
||||
func TestResourceClaimTemplateEventHandler(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
tCtx = ktesting.WithCancel(tCtx)
|
||||
|
||||
fakeKubeClient := createTestClient()
|
||||
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
||||
|
|
@ -631,33 +629,33 @@ func TestResourceClaimTemplateEventHandler(t *testing.T) {
|
|||
// - fake-1 in the tmp namespace
|
||||
_, err = podClient.Create(tCtx, testPodWithResource, metav1.CreateOptions{})
|
||||
_, err1 := podTmpClient.Create(tCtx, makePod("fake-1", tmpNamespace, "uidpod2", *makePodResourceClaim(podResourceClaimName, templateName)), metav1.CreateOptions{})
|
||||
ktesting.Step(tCtx, "create pod", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("create pod", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
tCtx.ExpectNoError(err1)
|
||||
expectQueue(tCtx, []string{testPodKey, podKeyPrefix + tmpNamespace + "/" + "fake-1"}, []string{testNamespace + "/" + templateName, tmpNamespace + "/" + templateName})
|
||||
})
|
||||
|
||||
// The item has been forgotten and marked as done in the workqueue,so queue is nil
|
||||
ktesting.Step(tCtx, "expect queue is nil", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("expect queue is nil", func(tCtx ktesting.TContext) {
|
||||
expectQueue(tCtx, []string{}, []string{testNamespace + "/" + templateName, tmpNamespace + "/" + templateName})
|
||||
})
|
||||
|
||||
// After create claim template,queue should have test pod key
|
||||
_, err = claimTemplateClient.Create(tCtx, template, metav1.CreateOptions{})
|
||||
ktesting.Step(tCtx, "create claim template after pod backoff", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("create claim template after pod backoff", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
expectQueue(tCtx, []string{testPodKey}, []string{testNamespace + "/" + templateName, tmpNamespace + "/" + templateName})
|
||||
})
|
||||
|
||||
// The item has been forgotten and marked as done in the workqueue,so queue is nil
|
||||
ktesting.Step(tCtx, "expect queue is nil", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("expect queue is nil", func(tCtx ktesting.TContext) {
|
||||
expectQueue(tCtx, []string{}, []string{testNamespace + "/" + templateName, tmpNamespace + "/" + templateName})
|
||||
})
|
||||
|
||||
// After create tmp namespace claim template,queue should have fake pod key
|
||||
TmpNamespaceTemplate := makeTemplate(templateName, "tmp", className, nil)
|
||||
_, err = claimTemplateTmpClient.Create(tCtx, TmpNamespaceTemplate, metav1.CreateOptions{})
|
||||
ktesting.Step(tCtx, "create claim template in tmp namespace after pod backoff in test namespace", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("create claim template in tmp namespace after pod backoff in test namespace", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
expectQueue(tCtx, []string{podKeyPrefix + tmpNamespace + "/" + "fake-1"}, []string{testNamespace + "/" + templateName, tmpNamespace + "/" + templateName})
|
||||
})
|
||||
|
|
@ -666,7 +664,6 @@ func TestResourceClaimTemplateEventHandler(t *testing.T) {
|
|||
|
||||
func TestResourceClaimEventHandler(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
tCtx = ktesting.WithCancel(tCtx)
|
||||
|
||||
fakeKubeClient := createTestClient()
|
||||
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
||||
|
|
@ -725,7 +722,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
|
||||
_, err = claimClient.Create(tCtx, testClaim, metav1.CreateOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "false", AdminAccess: "false", Source: ""}, 1)
|
||||
ktesting.Step(tCtx, "create claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("create claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
expectQueue(tCtx, []string{testClaimKey})
|
||||
|
|
@ -734,7 +731,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
modifiedClaim := testClaim.DeepCopy()
|
||||
modifiedClaim.Labels = map[string]string{"foo": "bar"}
|
||||
_, err = claimClient.Update(tCtx, modifiedClaim, metav1.UpdateOptions{})
|
||||
ktesting.Step(tCtx, "modify claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("modify claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Consistently(tCtx)
|
||||
expectQueue(tCtx, []string{testClaimKey})
|
||||
|
|
@ -743,7 +740,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
_, err = claimClient.Update(tCtx, testClaimAllocated, metav1.UpdateOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "false", AdminAccess: "false", Source: ""}, -1)
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "true", AdminAccess: "false", Source: ""}, 1)
|
||||
ktesting.Step(tCtx, "allocate claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("allocate claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
expectQueue(tCtx, []string{testClaimKey})
|
||||
|
|
@ -752,7 +749,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
modifiedClaim = testClaimAllocated.DeepCopy()
|
||||
modifiedClaim.Labels = map[string]string{"foo": "bar2"}
|
||||
_, err = claimClient.Update(tCtx, modifiedClaim, metav1.UpdateOptions{})
|
||||
ktesting.Step(tCtx, "modify claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("modify claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Consistently(tCtx)
|
||||
expectQueue(tCtx, []string{testClaimKey})
|
||||
|
|
@ -762,7 +759,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
otherClaimAllocated.Name += "2"
|
||||
_, err = claimClient.Create(tCtx, otherClaimAllocated, metav1.CreateOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "true", AdminAccess: "false", Source: ""}, 1)
|
||||
ktesting.Step(tCtx, "create allocated claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("create allocated claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
expectQueue(tCtx, []string{testClaimKey + "2"})
|
||||
|
|
@ -771,7 +768,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
_, err = claimClient.Update(tCtx, testClaim, metav1.UpdateOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "false", AdminAccess: "false", Source: ""}, 1)
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "true", AdminAccess: "false", Source: ""}, -1)
|
||||
ktesting.Step(tCtx, "deallocate claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("deallocate claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
expectQueue(tCtx, []string{testClaimKey})
|
||||
|
|
@ -779,7 +776,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
|
||||
err = claimClient.Delete(tCtx, testClaim.Name, metav1.DeleteOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "false", AdminAccess: "false", Source: ""}, -1)
|
||||
ktesting.Step(tCtx, "delete deallocated claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("delete deallocated claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
expectQueue(tCtx, []string{})
|
||||
|
|
@ -787,7 +784,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
|
||||
err = claimClient.Delete(tCtx, otherClaimAllocated.Name, metav1.DeleteOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "true", AdminAccess: "false", Source: ""}, -1)
|
||||
ktesting.Step(tCtx, "delete allocated claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("delete allocated claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
expectQueue(tCtx, []string{})
|
||||
|
|
@ -795,7 +792,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
|
||||
_, err = claimClient.Create(tCtx, templatedTestClaimWithAdmin, metav1.CreateOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "false", AdminAccess: "true", Source: "resource_claim_template"}, 1)
|
||||
ktesting.Step(tCtx, "create claim with admin access", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("create claim with admin access", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
})
|
||||
|
|
@ -803,7 +800,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
modifiedClaim = templatedTestClaimWithAdmin.DeepCopy()
|
||||
modifiedClaim.Labels = map[string]string{"foo": "bar"}
|
||||
_, err = claimClient.Update(tCtx, modifiedClaim, metav1.UpdateOptions{})
|
||||
ktesting.Step(tCtx, "modify claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("modify claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Consistently(tCtx)
|
||||
})
|
||||
|
|
@ -811,7 +808,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
_, err = claimClient.Update(tCtx, templatedTestClaimWithAdminAllocated, metav1.UpdateOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "false", AdminAccess: "true", Source: "resource_claim_template"}, -1)
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "true", AdminAccess: "true", Source: "resource_claim_template"}, 1)
|
||||
ktesting.Step(tCtx, "allocate claim with admin access", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("allocate claim with admin access", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
})
|
||||
|
|
@ -819,7 +816,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
modifiedClaim = templatedTestClaimWithAdminAllocated.DeepCopy()
|
||||
modifiedClaim.Labels = map[string]string{"foo": "bar2"}
|
||||
_, err = claimClient.Update(tCtx, modifiedClaim, metav1.UpdateOptions{})
|
||||
ktesting.Step(tCtx, "modify claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("modify claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Consistently(tCtx)
|
||||
})
|
||||
|
|
@ -828,7 +825,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
otherClaimAllocated.Name += "2"
|
||||
_, err = claimClient.Create(tCtx, otherClaimAllocated, metav1.CreateOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "true", AdminAccess: "true", Source: "resource_claim_template"}, 1)
|
||||
ktesting.Step(tCtx, "create allocated claim with admin access", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("create allocated claim with admin access", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
})
|
||||
|
|
@ -836,28 +833,28 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
_, err = claimClient.Update(tCtx, templatedTestClaimWithAdmin, metav1.UpdateOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "false", AdminAccess: "true", Source: "resource_claim_template"}, 1)
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "true", AdminAccess: "true", Source: "resource_claim_template"}, -1)
|
||||
ktesting.Step(tCtx, "deallocate claim with admin access", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("deallocate claim with admin access", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
})
|
||||
|
||||
err = claimClient.Delete(tCtx, templatedTestClaimWithAdmin.Name, metav1.DeleteOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "false", AdminAccess: "true", Source: "resource_claim_template"}, -1)
|
||||
ktesting.Step(tCtx, "delete deallocated claim with admin access", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("delete deallocated claim with admin access", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
})
|
||||
|
||||
err = claimClient.Delete(tCtx, otherClaimAllocated.Name, metav1.DeleteOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "true", AdminAccess: "true", Source: "resource_claim_template"}, -1)
|
||||
ktesting.Step(tCtx, "delete allocated claim with admin access", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("delete allocated claim with admin access", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
})
|
||||
|
||||
_, err = claimClient.Create(tCtx, extendedTestClaim, metav1.CreateOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "false", AdminAccess: "false", Source: "extended_resource"}, 1)
|
||||
ktesting.Step(tCtx, "create extended resource claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("create extended resource claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
})
|
||||
|
|
@ -865,7 +862,7 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
_, err = claimClient.Update(tCtx, extendedTestClaimAllocated, metav1.UpdateOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "false", AdminAccess: "false", Source: "extended_resource"}, -1)
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "true", AdminAccess: "false", Source: "extended_resource"}, 1)
|
||||
ktesting.Step(tCtx, "allocate extended resource claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("allocate extended resource claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
})
|
||||
|
|
@ -873,14 +870,14 @@ func TestResourceClaimEventHandler(t *testing.T) {
|
|||
_, err = claimClient.Update(tCtx, extendedTestClaim, metav1.UpdateOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "false", AdminAccess: "false", Source: "extended_resource"}, 1)
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "true", AdminAccess: "false", Source: "extended_resource"}, -1)
|
||||
ktesting.Step(tCtx, "deallocate extended resource claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("deallocate extended resource claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
})
|
||||
|
||||
err = claimClient.Delete(tCtx, extendedTestClaim.Name, metav1.DeleteOptions{})
|
||||
em = em.withUpdates(resourceclaimmetrics.NumResourceClaimLabels{Allocated: "false", AdminAccess: "false", Source: "extended_resource"}, -1)
|
||||
ktesting.Step(tCtx, "delete extended resource claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("delete extended resource claim", func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(err)
|
||||
em.Eventually(tCtx)
|
||||
})
|
||||
|
|
@ -1490,7 +1487,6 @@ func TestEnqueuePodExtendedResourceClaims(t *testing.T) {
|
|||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAExtendedResource, test.featureGateEnabled)
|
||||
|
||||
tCtx := ktesting.Init(t)
|
||||
tCtx = ktesting.WithCancel(tCtx)
|
||||
|
||||
fakeKubeClient := createTestClient()
|
||||
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
||||
|
|
|
|||
|
|
@ -911,11 +911,11 @@ func TestStaleOwnerRefOnScaleup(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestStatefulSetAvailabilityCheck(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
tCtx := ktesting.Init(t)
|
||||
|
||||
set := setMinReadySeconds(newStatefulSet(4), int32(5)) // 5 seconds
|
||||
set = setupPodManagementPolicy(apps.ParallelPodManagement, set)
|
||||
ssc, _, om, _ := newFakeStatefulSetController(ctx, set)
|
||||
ssc, _, om, _ := newFakeStatefulSetController(tCtx, set)
|
||||
if err := om.setsIndexer.Add(set); err != nil {
|
||||
t.Fatalf("could not add set to the cache: %v", err)
|
||||
}
|
||||
|
|
@ -939,7 +939,7 @@ func TestStatefulSetAvailabilityCheck(t *testing.T) {
|
|||
t.Fatalf("%d: %v", i, err)
|
||||
}
|
||||
}
|
||||
err := ssc.syncStatefulSet(ctx, set, pods)
|
||||
err := ssc.syncStatefulSet(tCtx, set, pods)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -963,9 +963,7 @@ func TestStatefulSetAvailabilityCheck(t *testing.T) {
|
|||
}
|
||||
|
||||
// RS should be re-queued after 700ms to recompute .status.availableReplicas (200ms extra for the test).
|
||||
ktesting.Eventually(ctx, func(tCtx ktesting.TContext) int {
|
||||
return ssc.queue.Len()
|
||||
}).WithTimeout(900*time.Millisecond).
|
||||
tCtx.Eventually(ssc.queue.Len).WithTimeout(900*time.Millisecond).
|
||||
WithPolling(10*time.Millisecond).
|
||||
Should(gomega.Equal(1), " StatefulSet should be re-queued to recompute .status.availableReplicas")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -296,7 +296,7 @@ func TestGRPCConnUsableAfterIdle(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
callCtx := ktesting.WithTimeout(tCtx, 10*time.Second, "call timed out")
|
||||
callCtx := tCtx.WithTimeout(10*time.Second, "call timed out")
|
||||
_, err = plugin.NodePrepareResources(callCtx, req)
|
||||
tCtx.ExpectNoError(err, "NodePrepareResources")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -105,7 +105,7 @@ func getFakeClient(t *testing.T, nodeName, driverName string, slice *resourceapi
|
|||
|
||||
func requireNoSlices(tCtx ktesting.TContext) {
|
||||
tCtx.Helper()
|
||||
ktesting.Eventually(tCtx, func(tCtx ktesting.TContext) error {
|
||||
tCtx.Eventually(func(tCtx ktesting.TContext) error {
|
||||
slices, err := tCtx.Client().ResourceV1().ResourceSlices().List(tCtx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -218,7 +218,7 @@ func TestRegistrationHandler(t *testing.T) {
|
|||
if test.withClient {
|
||||
fakeClient := getFakeClient(t, nodeName, test.driverName, getSlice("test-slice"))
|
||||
client = fakeClient
|
||||
tCtx = ktesting.WithClients(tCtx, nil, nil, client, nil, nil)
|
||||
tCtx = tCtx.WithClients(nil, nil, client, nil, nil)
|
||||
}
|
||||
|
||||
// The DRAPluginManager wipes all slices at startup.
|
||||
|
|
@ -307,7 +307,7 @@ func TestConnectionHandling(t *testing.T) {
|
|||
|
||||
slice := getSlice(sliceName)
|
||||
client := getFakeClient(t, nodeName, driverName, slice)
|
||||
tCtx = ktesting.WithClients(tCtx, nil, nil, client, nil, nil)
|
||||
tCtx = tCtx.WithClients(nil, nil, client, nil, nil)
|
||||
|
||||
// The handler wipes all slices at startup.
|
||||
draPlugins := NewDRAPluginManager(tCtx, client, getFakeNode, &mockStreamHandler{}, test.delay)
|
||||
|
|
|
|||
|
|
@ -29,25 +29,23 @@ import (
|
|||
)
|
||||
|
||||
func TestConfigurationChannels(t *testing.T) {
|
||||
ctx := ktesting.Init(t)
|
||||
ctx = ktesting.WithCancel(ctx)
|
||||
defer ctx.Cancel("TestConfigurationChannels completed")
|
||||
tCtx := ktesting.Init(t)
|
||||
defer tCtx.Cancel("TestConfigurationChannels completed")
|
||||
|
||||
mux := newMux(nil)
|
||||
channelOne := mux.ChannelWithContext(ctx, "one")
|
||||
if channelOne != mux.ChannelWithContext(ctx, "one") {
|
||||
channelOne := mux.ChannelWithContext(tCtx, "one")
|
||||
if channelOne != mux.ChannelWithContext(tCtx, "one") {
|
||||
t.Error("Didn't get the same muxuration channel back with the same name")
|
||||
}
|
||||
channelTwo := mux.ChannelWithContext(ctx, "two")
|
||||
channelTwo := mux.ChannelWithContext(tCtx, "two")
|
||||
if channelOne == channelTwo {
|
||||
t.Error("Got back the same muxuration channel for different names")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeInvoked(t *testing.T) {
|
||||
ctx := ktesting.Init(t)
|
||||
ctx = ktesting.WithCancel(ctx)
|
||||
defer ctx.Cancel("TestMergeInvoked completed")
|
||||
tCtx := ktesting.Init(t)
|
||||
defer tCtx.Cancel("TestMergeInvoked completed")
|
||||
|
||||
const expectedSource = "one"
|
||||
done := make(chan interface{})
|
||||
|
|
@ -65,13 +63,13 @@ func TestMergeInvoked(t *testing.T) {
|
|||
|
||||
mux := newMux(&merger)
|
||||
|
||||
mux.ChannelWithContext(ctx, expectedSource) <- fakeUpdate(expectedSource)
|
||||
mux.ChannelWithContext(tCtx, expectedSource) <- fakeUpdate(expectedSource)
|
||||
|
||||
// Wait for Merge call.
|
||||
select {
|
||||
case <-done:
|
||||
// Test complete.
|
||||
case <-ctx.Done():
|
||||
case <-tCtx.Done():
|
||||
t.Fatal("Test context canceled before completion")
|
||||
}
|
||||
}
|
||||
|
|
@ -84,9 +82,8 @@ func (f mergeFunc) Merge(ctx context.Context, source string, update sourceUpdate
|
|||
}
|
||||
|
||||
func TestSimultaneousMerge(t *testing.T) {
|
||||
ctx := ktesting.Init(t)
|
||||
ctx = ktesting.WithCancel(ctx)
|
||||
defer ctx.Cancel("TestSimultaneousMerge completed")
|
||||
tCtx := ktesting.Init(t)
|
||||
defer tCtx.Cancel("TestSimultaneousMerge completed")
|
||||
|
||||
ch := make(chan bool, 2)
|
||||
mux := newMux(mergeFunc(func(ctx context.Context, source string, update sourceUpdate) error {
|
||||
|
|
@ -96,8 +93,8 @@ func TestSimultaneousMerge(t *testing.T) {
|
|||
ch <- true
|
||||
return nil
|
||||
}))
|
||||
source := mux.ChannelWithContext(ctx, "one")
|
||||
source2 := mux.ChannelWithContext(ctx, "two")
|
||||
source := mux.ChannelWithContext(tCtx, "one")
|
||||
source2 := mux.ChannelWithContext(tCtx, "two")
|
||||
source <- fakeUpdate("one")
|
||||
source2 <- fakeUpdate("two")
|
||||
<-ch
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ func TestTCPPortExhaustion(t *testing.T) {
|
|||
}
|
||||
t.Logf("Adding %d pods with %d containers each in %v", numTestPods, numContainers, time.Since(now))
|
||||
|
||||
ctx := ktesting.WithTimeout(tCtx, 59*time.Second, "timeout 59 Second")
|
||||
ctx := tCtx.WithTimeout(59*time.Second, "timeout 59 Second")
|
||||
defer ctx.Cancel("TestTCPPortExhaustion completed")
|
||||
var wg sync.WaitGroup
|
||||
|
||||
|
|
|
|||
|
|
@ -586,7 +586,7 @@ func testCalculateResourceAllocatableRequest(tCtx ktesting.TContext) {
|
|||
// newTestDRAManager creates a DefaultDRAManager for testing purposes.
|
||||
// Only usable in a syntest bubble.
|
||||
func newTestDRAManager(tCtx ktesting.TContext, objects ...apiruntime.Object) *dynamicresources.DefaultDRAManager {
|
||||
tCtx = ktesting.WithCancel(tCtx)
|
||||
tCtx = tCtx.WithCancel()
|
||||
client := fake.NewClientset(objects...)
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
resourceSliceTrackerOpts := tracker.Options{
|
||||
|
|
|
|||
|
|
@ -1301,7 +1301,7 @@ func TestFrameworkHandler_IterateOverWaitingPods(t *testing.T) {
|
|||
// Wait all pods in waitSchedulingPods to be scheduled.
|
||||
wg.Wait()
|
||||
|
||||
utiltesting.Eventually(tCtx, func(utiltesting.TContext) sets.Set[string] {
|
||||
tCtx.Eventually(func(utiltesting.TContext) sets.Set[string] {
|
||||
// Ensure that all waitingPods in scheduler can be obtained from any profiles.
|
||||
actualPodNamesInWaitingPods := sets.New[string]()
|
||||
for _, schedFramework := range scheduler.Profiles {
|
||||
|
|
|
|||
|
|
@ -335,27 +335,27 @@ func TestRestore(t *testing.T) {
|
|||
newObj := makeObj("pvc1", "5", "")
|
||||
|
||||
// Restore object that doesn't exist
|
||||
ktesting.Step(tCtx, "empty cache", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("empty cache", func(tCtx ktesting.TContext) {
|
||||
cache.Restore("nothing")
|
||||
events.verifyAndFlush(tCtx, nil)
|
||||
})
|
||||
|
||||
// Add old object to cache.
|
||||
ktesting.Step(tCtx, "initial update", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("initial update", func(tCtx ktesting.TContext) {
|
||||
informer.add(oldObj)
|
||||
verify(tCtx, cache, oldObj.GetName(), oldObj, oldObj)
|
||||
events.verifyAndFlush(tCtx, []event{{What: "add", Obj: oldObj}})
|
||||
})
|
||||
|
||||
// Restore the same object.
|
||||
ktesting.Step(tCtx, "initial Restore", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("initial Restore", func(tCtx ktesting.TContext) {
|
||||
cache.Restore(oldObj.GetName())
|
||||
verify(tCtx, cache, oldObj.GetName(), oldObj, oldObj)
|
||||
events.verifyAndFlush(tCtx, nil)
|
||||
})
|
||||
|
||||
// Assume new object.
|
||||
ktesting.Step(tCtx, "Assume", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("Assume", func(tCtx ktesting.TContext) {
|
||||
if err := cache.Assume(newObj); err != nil {
|
||||
tCtx.Fatalf("Assume() returned error %v", err)
|
||||
}
|
||||
|
|
@ -364,7 +364,7 @@ func TestRestore(t *testing.T) {
|
|||
})
|
||||
|
||||
// Restore the same object.
|
||||
ktesting.Step(tCtx, "second Restore", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("second Restore", func(tCtx ktesting.TContext) {
|
||||
cache.Restore(oldObj.GetName())
|
||||
verify(tCtx, cache, oldObj.GetName(), oldObj, oldObj)
|
||||
events.verifyAndFlush(tCtx, []event{{What: "update", OldObj: newObj, Obj: oldObj}})
|
||||
|
|
@ -380,49 +380,49 @@ func TestEvents(t *testing.T) {
|
|||
|
||||
// Add old object to cache.
|
||||
informer.add(oldObj)
|
||||
verify(ktesting.WithStep(tCtx, "after initial update"), cache, key, oldObj, oldObj)
|
||||
verify(tCtx.WithStep("after initial update"), cache, key, oldObj, oldObj)
|
||||
|
||||
// Receive initial list.
|
||||
var events mockEventHandler
|
||||
cache.AddEventHandler(&events)
|
||||
events.verifyAndFlush(ktesting.WithStep(tCtx, "initial list"), []event{{What: "add", Obj: oldObj, InitialList: true}})
|
||||
events.verifyAndFlush(tCtx.WithStep("initial list"), []event{{What: "add", Obj: oldObj, InitialList: true}})
|
||||
|
||||
// Update object.
|
||||
ktesting.Step(tCtx, "initial update", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("initial update", func(tCtx ktesting.TContext) {
|
||||
informer.update(newObj)
|
||||
verify(tCtx, cache, key, newObj, newObj)
|
||||
events.verifyAndFlush(tCtx, []event{{What: "update", OldObj: oldObj, Obj: newObj}})
|
||||
})
|
||||
|
||||
// Some error cases (don't occur in practice).
|
||||
ktesting.Step(tCtx, "nop add", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("nop add", func(tCtx ktesting.TContext) {
|
||||
informer.add(1)
|
||||
verify(tCtx, cache, key, newObj, newObj)
|
||||
events.verifyAndFlush(tCtx, nil)
|
||||
})
|
||||
ktesting.Step(tCtx, "nil add", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("nil add", func(tCtx ktesting.TContext) {
|
||||
informer.add(nil)
|
||||
verify(tCtx, cache, key, newObj, newObj)
|
||||
events.verifyAndFlush(tCtx, nil)
|
||||
})
|
||||
ktesting.Step(tCtx, "nop update", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("nop update", func(tCtx ktesting.TContext) {
|
||||
informer.update(oldObj)
|
||||
events.verifyAndFlush(tCtx, nil)
|
||||
verify(tCtx, cache, key, newObj, newObj)
|
||||
})
|
||||
ktesting.Step(tCtx, "nil update", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("nil update", func(tCtx ktesting.TContext) {
|
||||
informer.update(nil)
|
||||
verify(tCtx, cache, key, newObj, newObj)
|
||||
events.verifyAndFlush(tCtx, nil)
|
||||
})
|
||||
ktesting.Step(tCtx, "nop delete", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("nop delete", func(tCtx ktesting.TContext) {
|
||||
informer.delete(nil)
|
||||
verify(tCtx, cache, key, newObj, newObj)
|
||||
events.verifyAndFlush(tCtx, nil)
|
||||
})
|
||||
|
||||
// Delete object.
|
||||
ktesting.Step(tCtx, "delete", func(tCtx ktesting.TContext) {
|
||||
tCtx.Step("delete", func(tCtx ktesting.TContext) {
|
||||
informer.delete(oldObj)
|
||||
events.verifyAndFlush(tCtx, []event{{What: "delete", Obj: newObj}})
|
||||
_, err := cache.Get(key)
|
||||
|
|
@ -504,7 +504,7 @@ func TestEventHandlerConcurrency(t *testing.T) {
|
|||
handlers[0].cache = cache
|
||||
|
||||
// Each add blocks until this gets cancelled.
|
||||
tCancelCtx := ktesting.WithCancel(tCtx)
|
||||
tCancelCtx := tCtx.WithCancel()
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := range handlers {
|
||||
|
|
@ -553,7 +553,7 @@ func TestListNoIndexer(t *testing.T) {
|
|||
}
|
||||
|
||||
// List them
|
||||
verifyList(ktesting.WithStep(tCtx, "after add"), cache, objs, "")
|
||||
verifyList(tCtx.WithStep("after add"), cache, objs, "")
|
||||
|
||||
// Update an object.
|
||||
updatedObj := makeObj("test-pvc3", "2", "")
|
||||
|
|
@ -561,7 +561,7 @@ func TestListNoIndexer(t *testing.T) {
|
|||
informer.update(updatedObj)
|
||||
|
||||
// List them
|
||||
verifyList(ktesting.WithStep(tCtx, "after update"), cache, objs, "")
|
||||
verifyList(tCtx.WithStep("after update"), cache, objs, "")
|
||||
|
||||
// Delete a PV
|
||||
deletedObj := objs[7]
|
||||
|
|
@ -569,7 +569,7 @@ func TestListNoIndexer(t *testing.T) {
|
|||
informer.delete(deletedObj)
|
||||
|
||||
// List them
|
||||
verifyList(ktesting.WithStep(tCtx, "after delete"), cache, objs, "")
|
||||
verifyList(tCtx.WithStep("after delete"), cache, objs, "")
|
||||
}
|
||||
|
||||
func TestListWithIndexer(t *testing.T) {
|
||||
|
|
@ -598,7 +598,7 @@ func TestListWithIndexer(t *testing.T) {
|
|||
}
|
||||
|
||||
// List them
|
||||
verifyList(ktesting.WithStep(tCtx, "after add"), cache, objs, objs[0])
|
||||
verifyList(tCtx.WithStep("after add"), cache, objs, objs[0])
|
||||
|
||||
// Update an object.
|
||||
updatedObj := makeObj("test-pvc3", "2", ns)
|
||||
|
|
@ -606,7 +606,7 @@ func TestListWithIndexer(t *testing.T) {
|
|||
informer.update(updatedObj)
|
||||
|
||||
// List them
|
||||
verifyList(ktesting.WithStep(tCtx, "after update"), cache, objs, objs[0])
|
||||
verifyList(tCtx.WithStep("after update"), cache, objs, objs[0])
|
||||
|
||||
// Delete a PV
|
||||
deletedObj := objs[7]
|
||||
|
|
@ -614,5 +614,5 @@ func TestListWithIndexer(t *testing.T) {
|
|||
informer.delete(deletedObj)
|
||||
|
||||
// List them
|
||||
verifyList(ktesting.WithStep(tCtx, "after delete"), cache, objs, objs[0])
|
||||
verifyList(tCtx.WithStep("after delete"), cache, objs, objs[0])
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,7 +55,6 @@ import (
|
|||
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
dratest "k8s.io/kubernetes/test/integration/dra"
|
||||
"k8s.io/kubernetes/test/utils/ktesting"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
|
@ -711,7 +710,7 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
|
|||
|
||||
// We expect one ResourceSlice per node from the driver.
|
||||
getSlices := oldDriver.NewGetSlices()
|
||||
ktesting.Eventually(tCtx, getSlices).Should(gomega.HaveField("Items", gomega.HaveLen(len(nodes.NodeNames))))
|
||||
tCtx.Eventually(getSlices).Should(gomega.HaveField("Items", gomega.HaveLen(len(nodes.NodeNames))))
|
||||
initialSlices := getSlices(tCtx)
|
||||
|
||||
// Same driver name, different socket paths because of rolling update.
|
||||
|
|
@ -752,7 +751,7 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
|
|||
|
||||
// We expect one ResourceSlice per node from the driver.
|
||||
getSlices := oldDriver.NewGetSlices()
|
||||
ktesting.Eventually(tCtx, getSlices).Should(gomega.HaveField("Items", gomega.HaveLen(len(nodes.NodeNames))))
|
||||
tCtx.Eventually(getSlices).Should(gomega.HaveField("Items", gomega.HaveLen(len(nodes.NodeNames))))
|
||||
initialSlices := getSlices(tCtx)
|
||||
|
||||
// Same driver name, different socket paths because of rolling update.
|
||||
|
|
@ -795,7 +794,7 @@ var _ = framework.SIGDescribe("node")(framework.WithLabel("DRA"), func() {
|
|||
|
||||
// Collect set of resource slices for that driver.
|
||||
listSlices := oldDriver.NewGetSlices()
|
||||
ktesting.Eventually(tCtx, listSlices).Should(gomega.HaveField("Items", gomega.Not(gomega.BeEmpty())), "driver should have published ResourceSlices, got none")
|
||||
tCtx.Eventually(listSlices).Should(gomega.HaveField("Items", gomega.Not(gomega.BeEmpty())), "driver should have published ResourceSlices, got none")
|
||||
oldSlices := listSlices(tCtx)
|
||||
if len(oldSlices.Items) == 0 {
|
||||
framework.Fail("driver should have published ResourceSlices, got none")
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ func TestApply(t *testing.T) {
|
|||
tCtx.Log("Stopping the apiserver...")
|
||||
server.TearDownFn()
|
||||
})
|
||||
tCtx = ktesting.WithRESTConfig(tCtx, server.ClientConfig)
|
||||
tCtx = tCtx.WithRESTConfig(server.ClientConfig)
|
||||
|
||||
// More sub-tests could be added here. Currently there's only one.
|
||||
tCtx.Run("optional-list-map-key", testOptionalListMapKey)
|
||||
|
|
|
|||
|
|
@ -599,7 +599,7 @@ func mustSetupCluster(tCtx ktesting.TContext, config *config.KubeSchedulerConfig
|
|||
// Cleanup will be in reverse order: first the clients by canceling the
|
||||
// child context (happens automatically), then the server.
|
||||
tCtx.Cleanup(server.TearDownFn)
|
||||
tCtx = ktesting.WithCancel(tCtx)
|
||||
tCtx = tCtx.WithCancel()
|
||||
|
||||
// TODO: client connection configuration, such as QPS or Burst is configurable in theory, this could be derived from the `config`, need to
|
||||
// support this when there is any testcase that depends on such configuration.
|
||||
|
|
@ -616,7 +616,7 @@ func mustSetupCluster(tCtx ktesting.TContext, config *config.KubeSchedulerConfig
|
|||
}
|
||||
}
|
||||
|
||||
tCtx = ktesting.WithRESTConfig(tCtx, cfg)
|
||||
tCtx = tCtx.WithRESTConfig(cfg)
|
||||
|
||||
// Not all config options will be effective but only those mostly related with scheduler performance will
|
||||
// be applied to start a scheduler, most of them are defined in `scheduler.schedulerOptions`.
|
||||
|
|
|
|||
|
|
@ -198,7 +198,7 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext, draManager framewo
|
|||
numSlices++
|
||||
}
|
||||
|
||||
ktesting.Eventually(tCtx, func(tCtx ktesting.TContext) int {
|
||||
tCtx.Eventually(func(tCtx ktesting.TContext) int {
|
||||
slices, err := draManager.ResourceSlices().ListWithDeviceTaintRules()
|
||||
tCtx.ExpectNoError(err, "list ResourceSlices")
|
||||
return len(slices)
|
||||
|
|
@ -278,7 +278,7 @@ func (op *allocResourceClaimsOp) run(tCtx ktesting.TContext) {
|
|||
claims, err := tCtx.Client().ResourceV1().ResourceClaims(op.Namespace).List(tCtx, metav1.ListOptions{})
|
||||
tCtx.ExpectNoError(err, "list claims")
|
||||
tCtx.Logf("allocating %d ResourceClaims", len(claims.Items))
|
||||
tCtx = ktesting.WithCancel(tCtx)
|
||||
tCtx = tCtx.WithCancel()
|
||||
defer tCtx.Cancel("allocResourceClaimsOp.run is done")
|
||||
|
||||
// Track cluster state.
|
||||
|
|
|
|||
|
|
@ -1164,7 +1164,7 @@ func setupTestCase(t testing.TB, tc *testCase, featureGates map[featuregate.Feat
|
|||
|
||||
// 30 minutes should be plenty enough even for the 5000-node tests.
|
||||
timeout := 30 * time.Minute
|
||||
tCtx = ktesting.WithTimeout(tCtx, timeout, fmt.Sprintf("timed out after the %s per-test timeout", timeout))
|
||||
tCtx = tCtx.WithTimeout(timeout, fmt.Sprintf("timed out after the %s per-test timeout", timeout))
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints) {
|
||||
registerQHintMetrics()
|
||||
|
|
@ -1517,7 +1517,7 @@ func checkEmptyInFlightEvents() error {
|
|||
}
|
||||
|
||||
func startCollectingMetrics(tCtx ktesting.TContext, collectorWG *sync.WaitGroup, podInformer coreinformers.PodInformer, mcc *metricsCollectorConfig, throughputErrorMargin float64, opIndex int, name string, namespaces []string, labelSelector map[string]string) (ktesting.TContext, []testDataCollector, error) {
|
||||
collectorCtx := ktesting.WithCancel(tCtx)
|
||||
collectorCtx := tCtx.WithCancel()
|
||||
workloadName := tCtx.Name()
|
||||
|
||||
// Clean up memory usage from the initial setup phase.
|
||||
|
|
@ -1617,7 +1617,7 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, topicName st
|
|||
podInformer := informerFactory.Core().V1().Pods()
|
||||
|
||||
// Everything else started by this function gets stopped before it returns.
|
||||
tCtx = ktesting.WithCancel(tCtx)
|
||||
tCtx = tCtx.WithCancel()
|
||||
|
||||
executor := WorkloadExecutor{
|
||||
tCtx: tCtx,
|
||||
|
|
@ -2096,7 +2096,7 @@ func createPodsSteadily(tCtx ktesting.TContext, namespace string, podInformer co
|
|||
return err
|
||||
}
|
||||
tCtx.Logf("creating pods in namespace %q for %s", namespace, cpo.Duration)
|
||||
tCtx = ktesting.WithTimeout(tCtx, cpo.Duration.Duration, fmt.Sprintf("the operation ran for the configured %s", cpo.Duration.Duration))
|
||||
tCtx = tCtx.WithTimeout(cpo.Duration.Duration, fmt.Sprintf("the operation ran for the configured %s", cpo.Duration.Duration))
|
||||
|
||||
// Start watching pods in the namespace. Any pod which is seen as being scheduled
|
||||
// gets deleted.
|
||||
|
|
|
|||
|
|
@ -216,7 +216,7 @@ func TestRunOp(t *testing.T) {
|
|||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, tCtx := ktesting.NewTestContext(t)
|
||||
client := fake.NewSimpleClientset()
|
||||
tCtx = ktesting.WithClients(tCtx, nil, nil, client, nil, nil)
|
||||
tCtx = tCtx.WithClients(nil, nil, client, nil, nil)
|
||||
|
||||
exec := &WorkloadExecutor{
|
||||
tCtx: tCtx,
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ func mustSetupCluster(tCtx ktesting.TContext, config *config.KubeSchedulerConfig
|
|||
// Cleanup will be in reverse order: first the clients by canceling the
|
||||
// child context (happens automatically), then the server.
|
||||
tCtx.Cleanup(server.TearDownFn)
|
||||
tCtx = ktesting.WithCancel(tCtx)
|
||||
tCtx = tCtx.WithCancel()
|
||||
|
||||
// TODO: client connection configuration, such as QPS or Burst is configurable in theory, this could be derived from the `config`, need to
|
||||
// support this when there is any testcase that depends on such configuration.
|
||||
|
|
@ -132,7 +132,7 @@ func mustSetupCluster(tCtx ktesting.TContext, config *config.KubeSchedulerConfig
|
|||
}
|
||||
}
|
||||
|
||||
tCtx = ktesting.WithRESTConfig(tCtx, cfg)
|
||||
tCtx = tCtx.WithRESTConfig(cfg)
|
||||
|
||||
// Not all config options will be effective but only those mostly related with scheduler performance will
|
||||
// be applied to start a scheduler, most of them are defined in `scheduler.schedulerOptions`.
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ func TestAssert(t *testing.T) {
|
|||
for name, tc := range map[string]testcase{
|
||||
"eventually-timeout": {
|
||||
cb: func(tCtx TContext) {
|
||||
Eventually(tCtx, func(tCtx TContext) int {
|
||||
tCtx.Eventually(func(tCtx TContext) int {
|
||||
// Canceling here is a nop.
|
||||
tCtx.Cancel("testing")
|
||||
return 0
|
||||
|
|
@ -46,7 +46,7 @@ func TestAssert(t *testing.T) {
|
|||
},
|
||||
"eventually-final": {
|
||||
cb: func(tCtx TContext) {
|
||||
Eventually(tCtx, func(tCtx TContext) float64 {
|
||||
tCtx.Eventually(func(tCtx TContext) float64 {
|
||||
gomega.StopTrying("final error").Now()
|
||||
return 0
|
||||
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||
|
|
@ -59,7 +59,7 @@ func TestAssert(t *testing.T) {
|
|||
},
|
||||
"eventually-error": {
|
||||
cb: func(tCtx TContext) {
|
||||
Eventually(tCtx, func(tCtx TContext) float64 {
|
||||
tCtx.Eventually(func(tCtx TContext) float64 {
|
||||
tCtx.Fatal("some error")
|
||||
return 0
|
||||
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||
|
|
@ -151,7 +151,7 @@ func TestAssert(t *testing.T) {
|
|||
},
|
||||
"eventually-success": {
|
||||
cb: func(tCtx TContext) {
|
||||
Eventually(tCtx, func(tCtx TContext) float64 {
|
||||
tCtx.Eventually(func(tCtx TContext) float64 {
|
||||
return 1.0
|
||||
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||
},
|
||||
|
|
@ -160,7 +160,7 @@ func TestAssert(t *testing.T) {
|
|||
},
|
||||
"eventually-retry": {
|
||||
cb: func(tCtx TContext) {
|
||||
Eventually(tCtx, func(tCtx TContext) float64 {
|
||||
tCtx.Eventually(func(tCtx TContext) float64 {
|
||||
gomega.TryAgainAfter(time.Millisecond).Now()
|
||||
return 0
|
||||
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||
|
|
@ -173,7 +173,7 @@ func TestAssert(t *testing.T) {
|
|||
},
|
||||
"consistently-timeout": {
|
||||
cb: func(tCtx TContext) {
|
||||
Consistently(tCtx, func(tCtx TContext) float64 {
|
||||
tCtx.Consistently(func(tCtx TContext) float64 {
|
||||
// Canceling here is a nop.
|
||||
tCtx.Cancel("testing")
|
||||
return 0
|
||||
|
|
@ -190,7 +190,7 @@ func TestAssert(t *testing.T) {
|
|||
},
|
||||
"consistently-final": {
|
||||
cb: func(tCtx TContext) {
|
||||
Consistently(tCtx, func(tCtx TContext) float64 {
|
||||
tCtx.Consistently(func(tCtx TContext) float64 {
|
||||
gomega.StopTrying("final error").Now()
|
||||
tCtx.FailNow()
|
||||
return 0
|
||||
|
|
@ -204,7 +204,7 @@ func TestAssert(t *testing.T) {
|
|||
},
|
||||
"consistently-error": {
|
||||
cb: func(tCtx TContext) {
|
||||
Consistently(tCtx, func(tCtx TContext) float64 {
|
||||
tCtx.Consistently(func(tCtx TContext) float64 {
|
||||
tCtx.Fatal("some error")
|
||||
return 0
|
||||
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||
|
|
@ -237,7 +237,7 @@ func TestAssert(t *testing.T) {
|
|||
},
|
||||
"consistently-success": {
|
||||
cb: func(tCtx TContext) {
|
||||
Consistently(tCtx, func(tCtx TContext) float64 {
|
||||
tCtx.Consistently(func(tCtx TContext) float64 {
|
||||
return 1.0
|
||||
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||
},
|
||||
|
|
@ -246,7 +246,7 @@ func TestAssert(t *testing.T) {
|
|||
},
|
||||
"consistently-retry": {
|
||||
cb: func(tCtx TContext) {
|
||||
Consistently(tCtx, func(tCtx TContext) float64 {
|
||||
tCtx.Consistently(func(tCtx TContext) float64 {
|
||||
gomega.TryAgainAfter(time.Millisecond).Wrap(errors.New("intermittent error")).Now()
|
||||
return 0
|
||||
}).WithTimeout(time.Second).Should(gomega.Equal(1.0))
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ func TestWithError(t *testing.T) {
|
|||
assert.Panics(t, func() {
|
||||
tCtx := Init(t)
|
||||
var err error
|
||||
_, finalize := WithError(tCtx, &err)
|
||||
_, finalize := tCtx.WithError(&err)
|
||||
defer finalize()
|
||||
|
||||
panic("pass me through")
|
||||
|
|
@ -99,7 +99,7 @@ second error`,
|
|||
t.Run(name, func(t *testing.T) {
|
||||
tCtx := Init(t)
|
||||
err := normalErr
|
||||
tCtx, finalize := WithError(tCtx, &err)
|
||||
tCtx, finalize := tCtx.WithError(&err)
|
||||
func() {
|
||||
defer finalize()
|
||||
tc.cb(tCtx)
|
||||
|
|
|
|||
|
|
@ -59,11 +59,11 @@ func TestInfo(t *testing.T) {
|
|||
|
||||
func TestWithStep(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
bake(ktesting.WithStep(tCtx, "bake cake"))
|
||||
bake(tCtx.WithStep("bake cake"))
|
||||
}
|
||||
|
||||
func bake(tCtx ktesting.TContext) {
|
||||
heatOven(ktesting.WithStep(tCtx, "set heat for baking"))
|
||||
heatOven(tCtx.WithStep("set heat for baking"))
|
||||
}
|
||||
|
||||
func heatOven(tCtx ktesting.TContext) {
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ func Step(tCtx TContext, step string, cb func(tCtx TContext)) {
|
|||
// context variables and risk of using the wrong one.
|
||||
func (tc *TC) Step(step string, cb func(tCtx TContext)) {
|
||||
tc.Helper()
|
||||
cb(WithStep(tc, step))
|
||||
cb(tc.WithStep(step))
|
||||
}
|
||||
|
||||
// Value intercepts a search for the special "GINKGO_SPEC_CONTEXT" and
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ func TestStepContext(t *testing.T) {
|
|||
for name, tc := range map[string]testcase{
|
||||
"output": {
|
||||
cb: func(tCtx TContext) {
|
||||
tCtx = WithStep(tCtx, "step")
|
||||
tCtx = tCtx.WithStep("step")
|
||||
tCtx.Log("Log", "a", "b", 42)
|
||||
tCtx.Logf("Logf %s %s %d", "a", "b", 42)
|
||||
tCtx.Error("Error", "a", "b", 42)
|
||||
|
|
@ -47,7 +47,7 @@ func TestStepContext(t *testing.T) {
|
|||
},
|
||||
"fatal": {
|
||||
cb: func(tCtx TContext) {
|
||||
tCtx = WithStep(tCtx, "step")
|
||||
tCtx = tCtx.WithStep("step")
|
||||
tCtx.Fatal("Error", "a", "b", 42)
|
||||
// not reached
|
||||
tCtx.Log("Log")
|
||||
|
|
@ -58,7 +58,7 @@ func TestStepContext(t *testing.T) {
|
|||
},
|
||||
"fatalf": {
|
||||
cb: func(tCtx TContext) {
|
||||
tCtx = WithStep(tCtx, "step")
|
||||
tCtx = tCtx.WithStep("step")
|
||||
tCtx.Fatalf("Error %s %s %d", "a", "b", 42)
|
||||
// not reached
|
||||
tCtx.Log("Log")
|
||||
|
|
@ -93,7 +93,7 @@ func TestProgressReport(t *testing.T) {
|
|||
|
||||
// This must use a real testing.T, otherwise Init doesn't initialize signal handling.
|
||||
tCtx := Init(t)
|
||||
tCtx = WithStep(tCtx, "step")
|
||||
tCtx = tCtx.WithStep("step")
|
||||
removeReporter := tCtx.Value("GINKGO_SPEC_CONTEXT").(ginkgoReporter).AttachProgressReporter(func() string { return "hello world" })
|
||||
defer removeReporter()
|
||||
tCtx.Expect(tCtx.Value("some other key")).To(gomega.BeNil(), "value for unknown context value key")
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ func Init(tb TB, opts ...InitOption) TContext {
|
|||
if cancelTimeout != nil {
|
||||
tCtx.cancel = cancelTimeout
|
||||
} else {
|
||||
tCtx = WithCancel(tCtx)
|
||||
tCtx = tCtx.WithCancel()
|
||||
tCtx.Cleanup(func() {
|
||||
tCtx.Cancel(cleanupErr(tCtx.Name()).Error())
|
||||
})
|
||||
|
|
@ -246,7 +246,7 @@ func (tc *TC) withTB(tb TB) TContext {
|
|||
logger := newLogger(tb, false /* don't buffer logs in sub-test */)
|
||||
tc.Context = klog.NewContext(tc.Context, logger)
|
||||
}
|
||||
tc = WithCancel(tc)
|
||||
tc = tc.WithCancel()
|
||||
return tc
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -62,8 +62,8 @@ func TestCancelAutomatic(t *testing.T) {
|
|||
func TestCancelCtx(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
var discardLogger klog.Logger
|
||||
tCtx = ktesting.WithLogger(tCtx, discardLogger)
|
||||
tCtx = ktesting.WithRESTConfig(tCtx, new(rest.Config))
|
||||
tCtx = tCtx.WithLogger(discardLogger)
|
||||
tCtx = tCtx.WithRESTConfig(new(rest.Config))
|
||||
baseCtx := tCtx
|
||||
|
||||
tCtx.Cleanup(func() {
|
||||
|
|
@ -112,7 +112,7 @@ func TestRun(t *testing.T) {
|
|||
client := clientset.New(nil)
|
||||
dynamic := dynamic.New(nil)
|
||||
apiextensions := apiextensions.New(nil)
|
||||
tCtx = ktesting.WithClients(tCtx, cfg, mapper, client, dynamic, apiextensions)
|
||||
tCtx = tCtx.WithClients(cfg, mapper, client, dynamic, apiextensions)
|
||||
|
||||
tCtx.Run("sub", func(tCtx ktesting.TContext) {
|
||||
assert.Equal(t, cfg, tCtx.RESTConfig(), "RESTConfig")
|
||||
|
|
|
|||
|
|
@ -90,9 +90,9 @@ func (c *Cmd) Start(tCtx ktesting.TContext) {
|
|||
tCtx.Helper()
|
||||
tCtx.Logf("running command %s: %s", c.Name, strings.Join(c.CommandLine, " "))
|
||||
if c.KeepRunning {
|
||||
tCtx = ktesting.WithoutCancel(tCtx)
|
||||
tCtx = tCtx.WithoutCancel()
|
||||
}
|
||||
tCtx = ktesting.WithCancel(tCtx)
|
||||
tCtx = tCtx.WithCancel()
|
||||
c.cancel = tCtx.Cancel
|
||||
c.cmd = exec.CommandContext(tCtx, c.CommandLine[0], c.CommandLine[1:]...)
|
||||
c.gathering = false
|
||||
|
|
|
|||
|
|
@ -436,7 +436,7 @@ func (c *Cluster) runComponentWithRetry(tCtx ktesting.TContext, cmd *Cmd) {
|
|||
cmd.Start(tCtx)
|
||||
c.running[KubeComponentName(cmd.Name)] = cmd
|
||||
err := func() (finalErr error) {
|
||||
tCtx, finalize := ktesting.WithError(tCtx, &finalErr)
|
||||
tCtx, finalize := tCtx.WithError(&finalErr)
|
||||
defer finalize()
|
||||
c.checkReadiness(tCtx, cmd)
|
||||
return nil
|
||||
|
|
@ -456,7 +456,7 @@ func (c *Cluster) runComponentWithRetry(tCtx ktesting.TContext, cmd *Cmd) {
|
|||
|
||||
func (c *Cluster) checkReadiness(tCtx ktesting.TContext, cmd *Cmd) {
|
||||
restConfig := c.LoadConfig(tCtx)
|
||||
tCtx = ktesting.WithRESTConfig(tCtx, restConfig)
|
||||
tCtx = tCtx.WithRESTConfig(restConfig)
|
||||
tCtx = tCtx.WithStep(fmt.Sprintf("wait for %s readiness", cmd.Name))
|
||||
|
||||
switch KubeComponentName(cmd.Name) {
|
||||
|
|
|
|||
Loading…
Reference in a new issue