mirror of
https://github.com/kubernetes/kubernetes.git
synced 2026-02-03 20:40:26 -05:00
DRA integration: increase timeout, II
When running with race detection enabled several tests have suffered from timeouts recently, with no obvious commit which could be blamed for this. Let's use a common constant and make it larger.
This commit is contained in:
parent
8c9c67c000
commit
47c3b10e8d
1 changed files with 9 additions and 9 deletions
|
|
@ -121,7 +121,7 @@ func testDeviceBindingConditionsBasicFlow(tCtx ktesting.TContext, enabled bool)
|
|||
start := time.Now()
|
||||
claim1 := createClaim(tCtx, namespace, "-a", class, claim)
|
||||
pod := createPod(tCtx, namespace, "-a", podWithClaimName, claim1)
|
||||
claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, 10*time.Second)
|
||||
claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, schedulingTimeout)
|
||||
end := time.Now()
|
||||
gomega.NewWithT(tCtx).Expect(claim1).To(gomega.HaveField("Status.Allocation", gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
|
||||
"Devices": gomega.Equal(resourceapi.DeviceAllocationResult{
|
||||
|
|
@ -143,7 +143,7 @@ func testDeviceBindingConditionsBasicFlow(tCtx ktesting.TContext, enabled bool)
|
|||
// Second pod should get the device with binding conditions.
|
||||
claim2 := createClaim(tCtx, namespace, "-b", class, claim)
|
||||
pod = createPod(tCtx, namespace, "-b", podWithClaimName, claim2)
|
||||
claim2 = waitForClaimAllocatedToDevice(tCtx, namespace, claim2.Name, 10*time.Second)
|
||||
claim2 = waitForClaimAllocatedToDevice(tCtx, namespace, claim2.Name, schedulingTimeout)
|
||||
end = time.Now()
|
||||
gomega.NewWithT(tCtx).Expect(claim2).To(gomega.HaveField("Status.Allocation", gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
|
||||
"Devices": gomega.Equal(resourceapi.DeviceAllocationResult{
|
||||
|
|
@ -193,7 +193,7 @@ func testDeviceBindingConditionsBasicFlow(tCtx ktesting.TContext, enabled bool)
|
|||
)
|
||||
|
||||
// allocation restored?
|
||||
claim2 = waitForClaimAllocatedToDevice(tCtx, namespace, claim2.Name, 10*time.Second)
|
||||
claim2 = waitForClaimAllocatedToDevice(tCtx, namespace, claim2.Name, schedulingTimeout)
|
||||
|
||||
// Now it's safe to set the final binding condition.
|
||||
// Allow the scheduler to proceed.
|
||||
|
|
@ -262,7 +262,7 @@ func testDeviceBindingFailureConditionsReschedule(tCtx ktesting.TContext, useTai
|
|||
// ensuring the initial allocation occurs on the intended node.
|
||||
claim1 := createClaim(tCtx, namespace, "-a", class, claim)
|
||||
pod := createPod(tCtx, namespace, "-a", podWithClaimName, claim1)
|
||||
claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, 10*time.Second)
|
||||
claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, schedulingTimeout)
|
||||
gomega.NewWithT(tCtx).Expect(claim1).To(gomega.HaveField("Status.Allocation", gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
|
||||
"Devices": gomega.Equal(resourceapi.DeviceAllocationResult{
|
||||
Results: []resourceapi.DeviceRequestAllocationResult{{
|
||||
|
|
@ -364,7 +364,7 @@ func testDeviceBindingFailureConditionsReschedule(tCtx ktesting.TContext, useTai
|
|||
)
|
||||
|
||||
// allocation restored?
|
||||
claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, 10*time.Second)
|
||||
claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, schedulingTimeout)
|
||||
|
||||
gomega.NewWithT(tCtx).Expect(claim1).To(gomega.HaveField("Status.Allocation", gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
|
||||
"Devices": gomega.Equal(resourceapi.DeviceAllocationResult{
|
||||
|
|
@ -424,7 +424,7 @@ profiles:
|
|||
pod := createPod(tCtx, namespace, "-timeout-enforced", podWithClaimName, claim1)
|
||||
|
||||
// Wait until the claim is allocated.
|
||||
allocatedClaim := waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, 10*time.Second)
|
||||
allocatedClaim := waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, schedulingTimeout)
|
||||
|
||||
gomega.NewWithT(tCtx).Expect(allocatedClaim).To(gomega.HaveField(
|
||||
"Status.Allocation",
|
||||
|
|
@ -519,7 +519,7 @@ profiles:
|
|||
claim1 := createClaim(tCtx, namespace, "-timeout", class, claim)
|
||||
pod := createPod(tCtx, namespace, "-timeout", podWithClaimName, claim1)
|
||||
|
||||
claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, 10*time.Second)
|
||||
claim1 = waitForClaimAllocatedToDevice(tCtx, namespace, claim1.Name, schedulingTimeout)
|
||||
gomega.NewWithT(tCtx).Expect(claim1).To(gomega.HaveField(
|
||||
"Status.Allocation",
|
||||
gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
|
||||
|
|
@ -564,7 +564,7 @@ profiles:
|
|||
}
|
||||
_, err = tCtx.Client().ResourceV1().ResourceSlices().Get(tCtx, sliceWithoutBinding.Name, metav1.GetOptions{})
|
||||
return err
|
||||
}).WithTimeout(10*time.Second).WithPolling(300*time.Millisecond).Should(
|
||||
}).WithTimeout(schedulingTimeout).WithPolling(300*time.Millisecond).Should(
|
||||
gomega.Succeed(), "slice must be created before binding timeout")
|
||||
|
||||
// Wait until the binding timeout occurs.
|
||||
|
|
@ -584,7 +584,7 @@ profiles:
|
|||
// Verify recovery to the newly added device without BindingConditions through rescheduling triggered by binding timeout.
|
||||
tCtx.Eventually(func(tCtx ktesting.TContext) (*resourceapi.ResourceClaim, error) {
|
||||
return tCtx.Client().ResourceV1().ResourceClaims(namespace).Get(tCtx, claim1.Name, metav1.GetOptions{})
|
||||
}).WithTimeout(10*time.Second).WithPolling(1*time.Second).Should(gomega.HaveField(
|
||||
}).WithTimeout(schedulingTimeout).WithPolling(1*time.Second).Should(gomega.HaveField(
|
||||
"Status.Allocation",
|
||||
gstruct.PointTo(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
|
||||
"Devices": gomega.Equal(resourceapi.DeviceAllocationResult{
|
||||
|
|
|
|||
Loading…
Reference in a new issue