2021-08-12 17:13:11 -04:00
//go:build linux
2016-08-03 14:43:24 -04:00
// +build linux
/ *
Copyright 2015 The Kubernetes Authors .
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
2019-11-06 22:59:05 -05:00
package e2enode
2016-08-03 14:43:24 -04:00
import (
2020-02-07 21:16:47 -05:00
"context"
2016-08-03 14:43:24 -04:00
"fmt"
2019-12-10 05:53:21 -05:00
"math"
2016-08-03 14:43:24 -04:00
"sort"
"strconv"
"sync"
"time"
2022-03-24 02:02:00 -04:00
"k8s.io/apimachinery/pkg/util/wait"
2022-03-29 02:12:12 -04:00
"github.com/onsi/ginkgo/v2"
2022-03-24 02:02:00 -04:00
"github.com/onsi/gomega"
2020-01-02 02:52:05 -05:00
v1 "k8s.io/api/core/v1"
2017-01-11 09:09:48 -05:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
2017-01-24 09:11:51 -05:00
"k8s.io/client-go/tools/cache"
2020-07-11 06:59:52 -04:00
kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
2018-08-29 12:07:52 -04:00
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
2016-08-03 14:43:24 -04:00
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework"
2019-07-22 22:57:56 -04:00
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
2019-07-02 23:15:20 -04:00
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2e: adapt to moved code
This is the result of automatically editing source files like this:
go install golang.org/x/tools/cmd/goimports@latest
find ./test/e2e* -name "*.go" | xargs env PATH=$GOPATH/bin:$PATH ./e2e-framework-sed.sh
with e2e-framework-sed.sh containing this:
sed -i \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainer(/e2epod.ExecCommandInContainer(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecCommandInContainerWithFullOutput(/e2epod.ExecCommandInContainerWithFullOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInContainer(/e2epod.ExecShellInContainer(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPod(/e2epod.ExecShellInPod(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecShellInPodWithFullOutput(/e2epod.ExecShellInPodWithFullOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.ExecWithOptions(/e2epod.ExecWithOptions(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.MatchContainerOutput(/e2eoutput.MatchContainerOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClient(/e2epod.NewPodClient(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.PodClientNS(/e2epod.PodClientNS(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutput(/e2eoutput.TestContainerOutput(\1, /" \
-e "s/\(f\|fr\|\w\w*\.[fF]\w*\)\.TestContainerOutputRegexp(/e2eoutput.TestContainerOutputRegexp(\1, /" \
-e "s/framework.AddOrUpdateLabelOnNode\b/e2enode.AddOrUpdateLabelOnNode/" \
-e "s/framework.AllNodes\b/e2edebug.AllNodes/" \
-e "s/framework.AllNodesReady\b/e2enode.AllNodesReady/" \
-e "s/framework.ContainerResourceGatherer\b/e2edebug.ContainerResourceGatherer/" \
-e "s/framework.ContainerResourceUsage\b/e2edebug.ContainerResourceUsage/" \
-e "s/framework.CreateEmptyFileOnPod\b/e2eoutput.CreateEmptyFileOnPod/" \
-e "s/framework.DefaultPodDeletionTimeout\b/e2epod.DefaultPodDeletionTimeout/" \
-e "s/framework.DumpAllNamespaceInfo\b/e2edebug.DumpAllNamespaceInfo/" \
-e "s/framework.DumpDebugInfo\b/e2eoutput.DumpDebugInfo/" \
-e "s/framework.DumpNodeDebugInfo\b/e2edebug.DumpNodeDebugInfo/" \
-e "s/framework.EtcdUpgrade\b/e2eproviders.EtcdUpgrade/" \
-e "s/framework.EventsLister\b/e2edebug.EventsLister/" \
-e "s/framework.ExecOptions\b/e2epod.ExecOptions/" \
-e "s/framework.ExpectNodeHasLabel\b/e2enode.ExpectNodeHasLabel/" \
-e "s/framework.ExpectNodeHasTaint\b/e2enode.ExpectNodeHasTaint/" \
-e "s/framework.GCEUpgradeScript\b/e2eproviders.GCEUpgradeScript/" \
-e "s/framework.ImagePrePullList\b/e2epod.ImagePrePullList/" \
-e "s/framework.KubectlBuilder\b/e2ekubectl.KubectlBuilder/" \
-e "s/framework.LocationParamGKE\b/e2eproviders.LocationParamGKE/" \
-e "s/framework.LogSizeDataTimeseries\b/e2edebug.LogSizeDataTimeseries/" \
-e "s/framework.LogSizeGatherer\b/e2edebug.LogSizeGatherer/" \
-e "s/framework.LogsSizeData\b/e2edebug.LogsSizeData/" \
-e "s/framework.LogsSizeDataSummary\b/e2edebug.LogsSizeDataSummary/" \
-e "s/framework.LogsSizeVerifier\b/e2edebug.LogsSizeVerifier/" \
-e "s/framework.LookForStringInLog\b/e2eoutput.LookForStringInLog/" \
-e "s/framework.LookForStringInPodExec\b/e2eoutput.LookForStringInPodExec/" \
-e "s/framework.LookForStringInPodExecToContainer\b/e2eoutput.LookForStringInPodExecToContainer/" \
-e "s/framework.MasterAndDNSNodes\b/e2edebug.MasterAndDNSNodes/" \
-e "s/framework.MasterNodes\b/e2edebug.MasterNodes/" \
-e "s/framework.MasterUpgradeGKE\b/e2eproviders.MasterUpgradeGKE/" \
-e "s/framework.NewKubectlCommand\b/e2ekubectl.NewKubectlCommand/" \
-e "s/framework.NewLogsVerifier\b/e2edebug.NewLogsVerifier/" \
-e "s/framework.NewNodeKiller\b/e2enode.NewNodeKiller/" \
-e "s/framework.NewResourceUsageGatherer\b/e2edebug.NewResourceUsageGatherer/" \
-e "s/framework.NodeHasTaint\b/e2enode.NodeHasTaint/" \
-e "s/framework.NodeKiller\b/e2enode.NodeKiller/" \
-e "s/framework.NodesSet\b/e2edebug.NodesSet/" \
-e "s/framework.PodClient\b/e2epod.PodClient/" \
-e "s/framework.RemoveLabelOffNode\b/e2enode.RemoveLabelOffNode/" \
-e "s/framework.ResourceConstraint\b/e2edebug.ResourceConstraint/" \
-e "s/framework.ResourceGathererOptions\b/e2edebug.ResourceGathererOptions/" \
-e "s/framework.ResourceUsagePerContainer\b/e2edebug.ResourceUsagePerContainer/" \
-e "s/framework.ResourceUsageSummary\b/e2edebug.ResourceUsageSummary/" \
-e "s/framework.RunHostCmd\b/e2eoutput.RunHostCmd/" \
-e "s/framework.RunHostCmdOrDie\b/e2eoutput.RunHostCmdOrDie/" \
-e "s/framework.RunHostCmdWithFullOutput\b/e2eoutput.RunHostCmdWithFullOutput/" \
-e "s/framework.RunHostCmdWithRetries\b/e2eoutput.RunHostCmdWithRetries/" \
-e "s/framework.RunKubectl\b/e2ekubectl.RunKubectl/" \
-e "s/framework.RunKubectlInput\b/e2ekubectl.RunKubectlInput/" \
-e "s/framework.RunKubectlOrDie\b/e2ekubectl.RunKubectlOrDie/" \
-e "s/framework.RunKubectlOrDieInput\b/e2ekubectl.RunKubectlOrDieInput/" \
-e "s/framework.RunKubectlWithFullOutput\b/e2ekubectl.RunKubectlWithFullOutput/" \
-e "s/framework.RunKubemciCmd\b/e2ekubectl.RunKubemciCmd/" \
-e "s/framework.RunKubemciWithKubeconfig\b/e2ekubectl.RunKubemciWithKubeconfig/" \
-e "s/framework.SingleContainerSummary\b/e2edebug.SingleContainerSummary/" \
-e "s/framework.SingleLogSummary\b/e2edebug.SingleLogSummary/" \
-e "s/framework.TimestampedSize\b/e2edebug.TimestampedSize/" \
-e "s/framework.WaitForAllNodesSchedulable\b/e2enode.WaitForAllNodesSchedulable/" \
-e "s/framework.WaitForSSHTunnels\b/e2enode.WaitForSSHTunnels/" \
-e "s/framework.WorkItem\b/e2edebug.WorkItem/" \
"$@"
for i in "$@"; do
# Import all sub packages and let goimports figure out which of those
# are redundant (= already imported) or not needed.
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2enode "k8s.io/kubernetes/test/e2e/framework/node"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2epod "k8s.io/kubernetes/test/e2e/framework/pod"' "$i"
sed -i -e '/"k8s.io.kubernetes.test.e2e.framework"/a e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"' "$i"
goimports -w "$i"
done
2022-09-08 10:04:17 -04:00
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
2018-01-31 02:06:32 -05:00
imageutils "k8s.io/kubernetes/test/utils/image"
2022-04-04 08:00:06 -04:00
admissionapi "k8s.io/pod-security-admission/api"
2016-08-03 14:43:24 -04:00
)
const (
kubeletAddr = "localhost:10255"
)
2023-06-20 04:27:14 -04:00
var _ = SIGDescribe ( "Density" , framework . WithSerial ( ) , framework . WithSlow ( ) , func ( ) {
2016-08-03 14:43:24 -04:00
const (
2016-08-12 23:56:10 -04:00
// The data collection time of resource collector and the standalone cadvisor
2019-02-15 05:11:29 -05:00
// is not synchronized, so resource collector may miss data or
2016-08-03 14:43:24 -04:00
// collect duplicated data
2016-08-12 23:56:10 -04:00
containerStatsPollingPeriod = 500 * time . Millisecond
2016-08-03 14:43:24 -04:00
)
var (
2016-09-08 20:24:26 -04:00
rc * ResourceCollector
2016-08-03 14:43:24 -04:00
)
f := framework . NewDefaultFramework ( "density-test" )
2023-05-10 09:38:10 -04:00
f . NamespacePodSecurityLevel = admissionapi . LevelPrivileged
2016-08-03 14:43:24 -04:00
2022-12-12 04:11:10 -05:00
ginkgo . BeforeEach ( func ( ctx context . Context ) {
2016-08-12 23:56:10 -04:00
// Start a standalone cadvisor pod using 'createSync', the pod is running when it returns
2022-12-12 04:11:10 -05:00
e2epod . NewPodClient ( f ) . CreateSync ( ctx , getCadvisorPod ( ) )
2016-08-12 23:56:10 -04:00
// Resource collector monitors fine-grain CPU/memory usage by a standalone Cadvisor with
// 1s housingkeeping interval
rc = NewResourceCollector ( containerStatsPollingPeriod )
2016-08-03 14:43:24 -04:00
} )
2019-07-28 00:49:36 -04:00
ginkgo . Context ( "create a batch of pods" , func ( ) {
2016-08-12 23:56:10 -04:00
// TODO(coufon): the values are generous, set more precise limits with benchmark data
// and add more tests
2016-08-10 13:48:32 -04:00
dTests := [ ] densityTest {
2016-08-03 14:43:24 -04:00
{
podsNr : 10 ,
interval : 0 * time . Millisecond ,
2019-07-22 22:57:56 -04:00
cpuLimits : e2ekubelet . ContainersCPUSummary {
2019-06-12 22:46:22 -04:00
kubeletstatsv1alpha1 . SystemContainerKubelet : { 0.50 : 0.30 , 0.95 : 0.50 } ,
kubeletstatsv1alpha1 . SystemContainerRuntime : { 0.50 : 0.40 , 0.95 : 0.60 } ,
2016-08-03 14:43:24 -04:00
} ,
2019-07-22 22:57:56 -04:00
memLimits : e2ekubelet . ResourceUsagePerContainer {
kubeletstatsv1alpha1 . SystemContainerKubelet : & e2ekubelet . ContainerResourceUsage { MemoryRSSInBytes : 100 * 1024 * 1024 } ,
kubeletstatsv1alpha1 . SystemContainerRuntime : & e2ekubelet . ContainerResourceUsage { MemoryRSSInBytes : 500 * 1024 * 1024 } ,
2016-08-03 14:43:24 -04:00
} ,
// percentile limit of single pod startup latency
2019-07-02 23:15:20 -04:00
podStartupLimits : e2emetrics . LatencyMetric {
2016-08-09 19:13:09 -04:00
Perc50 : 16 * time . Second ,
Perc90 : 18 * time . Second ,
2016-08-03 14:43:24 -04:00
Perc99 : 20 * time . Second ,
} ,
// upbound of startup latency of a batch of pods
podBatchStartupLimit : 25 * time . Second ,
} ,
}
2016-08-10 13:48:32 -04:00
for _ , testArg := range dTests {
2016-08-03 14:43:24 -04:00
itArg := testArg
2017-06-09 12:56:44 -04:00
desc := fmt . Sprintf ( "latency/resource should be within limit when create %d pods with %v interval" , itArg . podsNr , itArg . interval )
2022-10-17 08:47:15 -04:00
ginkgo . It ( desc , func ( ctx context . Context ) {
2016-08-17 13:38:43 -04:00
itArg . createMethod = "batch"
2017-06-09 12:56:44 -04:00
testInfo := getTestNodeInfo ( f , itArg . getTestName ( ) , desc )
2016-09-08 20:24:26 -04:00
2022-12-12 04:11:10 -05:00
batchLag , e2eLags := runDensityBatchTest ( ctx , f , rc , itArg , testInfo , false )
2016-08-12 23:56:10 -04:00
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Verifying latency" )
2022-12-12 04:11:10 -05:00
logAndVerifyLatency ( ctx , batchLag , e2eLags , itArg . podStartupLimits , itArg . podBatchStartupLimit , testInfo , true )
2016-08-12 23:56:10 -04:00
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Verifying resource" )
2022-12-12 04:11:10 -05:00
logAndVerifyResource ( ctx , f , rc , itArg . cpuLimits , itArg . memLimits , testInfo , true )
2016-08-12 23:56:10 -04:00
} )
}
} )
2019-07-28 00:49:36 -04:00
ginkgo . Context ( "create a batch of pods" , func ( ) {
2016-08-12 23:56:10 -04:00
dTests := [ ] densityTest {
{
podsNr : 10 ,
interval : 0 * time . Millisecond ,
} ,
{
podsNr : 35 ,
interval : 0 * time . Millisecond ,
} ,
{
2020-06-05 05:51:45 -04:00
podsNr : 90 ,
2016-08-12 23:56:10 -04:00
interval : 0 * time . Millisecond ,
} ,
2016-08-19 17:03:21 -04:00
{
podsNr : 10 ,
interval : 100 * time . Millisecond ,
} ,
{
podsNr : 35 ,
interval : 100 * time . Millisecond ,
} ,
{
2020-06-05 05:51:45 -04:00
podsNr : 90 ,
2016-08-19 17:03:21 -04:00
interval : 100 * time . Millisecond ,
} ,
{
podsNr : 10 ,
interval : 300 * time . Millisecond ,
} ,
{
podsNr : 35 ,
interval : 300 * time . Millisecond ,
} ,
{
2020-06-05 05:51:45 -04:00
podsNr : 90 ,
2016-08-19 17:03:21 -04:00
interval : 300 * time . Millisecond ,
} ,
2016-08-12 23:56:10 -04:00
}
for _ , testArg := range dTests {
itArg := testArg
2018-05-21 20:16:10 -04:00
desc := fmt . Sprintf ( "latency/resource should be within limit when create %d pods with %v interval [Benchmark][NodeSpecialFeature:Benchmark]" , itArg . podsNr , itArg . interval )
2022-10-17 08:47:15 -04:00
ginkgo . It ( desc , func ( ctx context . Context ) {
2016-08-17 13:38:43 -04:00
itArg . createMethod = "batch"
2017-06-09 12:56:44 -04:00
testInfo := getTestNodeInfo ( f , itArg . getTestName ( ) , desc )
2016-09-08 20:24:26 -04:00
2022-12-12 04:11:10 -05:00
batchLag , e2eLags := runDensityBatchTest ( ctx , f , rc , itArg , testInfo , true )
2016-08-12 23:56:10 -04:00
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Verifying latency" )
2022-12-12 04:11:10 -05:00
logAndVerifyLatency ( ctx , batchLag , e2eLags , itArg . podStartupLimits , itArg . podBatchStartupLimit , testInfo , false )
2016-08-03 14:43:24 -04:00
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Verifying resource" )
2022-12-12 04:11:10 -05:00
logAndVerifyResource ( ctx , f , rc , itArg . cpuLimits , itArg . memLimits , testInfo , false )
2016-08-03 14:43:24 -04:00
} )
}
} )
2019-07-28 00:49:36 -04:00
ginkgo . Context ( "create a batch of pods with higher API QPS" , func ( ) {
2016-09-07 19:34:47 -04:00
dTests := [ ] densityTest {
{
2020-06-05 05:51:45 -04:00
podsNr : 90 ,
2016-09-09 18:57:52 -04:00
interval : 0 * time . Millisecond ,
APIQPSLimit : 60 ,
2016-09-07 19:34:47 -04:00
} ,
{
2020-06-05 05:51:45 -04:00
podsNr : 90 ,
2016-09-09 18:57:52 -04:00
interval : 100 * time . Millisecond ,
APIQPSLimit : 60 ,
2016-09-07 19:34:47 -04:00
} ,
{
2020-06-05 05:51:45 -04:00
podsNr : 90 ,
2016-09-09 18:57:52 -04:00
interval : 300 * time . Millisecond ,
APIQPSLimit : 60 ,
2016-09-07 19:34:47 -04:00
} ,
}
for _ , testArg := range dTests {
itArg := testArg
2019-07-28 00:49:36 -04:00
ginkgo . Context ( "" , func ( ) {
2018-05-21 20:16:10 -04:00
desc := fmt . Sprintf ( "latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark][NodeSpecialFeature:Benchmark]" , itArg . podsNr , itArg . interval , itArg . APIQPSLimit )
2016-09-07 19:34:47 -04:00
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
// Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance.
// Note that it will cause higher resource usage.
2022-12-12 04:11:10 -05:00
tempSetCurrentKubeletConfig ( f , func ( ctx context . Context , cfg * kubeletconfig . KubeletConfiguration ) {
2019-08-27 05:18:43 -04:00
framework . Logf ( "Old QPS limit is: %d" , cfg . KubeAPIQPS )
2018-01-29 14:41:31 -05:00
// Set new API QPS limit
cfg . KubeAPIQPS = int32 ( itArg . APIQPSLimit )
} )
2022-10-17 08:47:15 -04:00
ginkgo . It ( desc , func ( ctx context . Context ) {
2018-01-29 14:41:31 -05:00
itArg . createMethod = "batch"
testInfo := getTestNodeInfo ( f , itArg . getTestName ( ) , desc )
2022-12-12 04:11:10 -05:00
batchLag , e2eLags := runDensityBatchTest ( ctx , f , rc , itArg , testInfo , true )
2018-01-29 14:41:31 -05:00
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Verifying latency" )
2022-12-12 04:11:10 -05:00
logAndVerifyLatency ( ctx , batchLag , e2eLags , itArg . podStartupLimits , itArg . podBatchStartupLimit , testInfo , false )
2018-01-29 14:41:31 -05:00
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Verifying resource" )
2022-12-12 04:11:10 -05:00
logAndVerifyResource ( ctx , f , rc , itArg . cpuLimits , itArg . memLimits , testInfo , false )
2018-01-29 14:41:31 -05:00
} )
2016-09-07 19:34:47 -04:00
} )
}
} )
2019-07-28 00:49:36 -04:00
ginkgo . Context ( "create a sequence of pods" , func ( ) {
2016-08-10 13:48:32 -04:00
dTests := [ ] densityTest {
2016-08-03 14:43:24 -04:00
{
podsNr : 10 ,
2016-08-12 23:56:10 -04:00
bgPodsNr : 50 ,
2019-07-22 22:57:56 -04:00
cpuLimits : e2ekubelet . ContainersCPUSummary {
2019-06-12 22:46:22 -04:00
kubeletstatsv1alpha1 . SystemContainerKubelet : { 0.50 : 0.30 , 0.95 : 0.50 } ,
kubeletstatsv1alpha1 . SystemContainerRuntime : { 0.50 : 0.40 , 0.95 : 0.60 } ,
2016-08-03 14:43:24 -04:00
} ,
2019-07-22 22:57:56 -04:00
memLimits : e2ekubelet . ResourceUsagePerContainer {
kubeletstatsv1alpha1 . SystemContainerKubelet : & e2ekubelet . ContainerResourceUsage { MemoryRSSInBytes : 100 * 1024 * 1024 } ,
kubeletstatsv1alpha1 . SystemContainerRuntime : & e2ekubelet . ContainerResourceUsage { MemoryRSSInBytes : 500 * 1024 * 1024 } ,
2016-08-03 14:43:24 -04:00
} ,
2019-07-02 23:15:20 -04:00
podStartupLimits : e2emetrics . LatencyMetric {
2016-08-29 21:32:16 -04:00
Perc50 : 5000 * time . Millisecond ,
Perc90 : 9000 * time . Millisecond ,
Perc99 : 10000 * time . Millisecond ,
2016-08-03 14:43:24 -04:00
} ,
} ,
}
2016-08-10 13:48:32 -04:00
for _ , testArg := range dTests {
2016-08-03 14:43:24 -04:00
itArg := testArg
2017-06-09 12:56:44 -04:00
desc := fmt . Sprintf ( "latency/resource should be within limit when create %d pods with %d background pods" , itArg . podsNr , itArg . bgPodsNr )
2022-10-17 08:47:15 -04:00
ginkgo . It ( desc , func ( ctx context . Context ) {
2016-08-17 13:38:43 -04:00
itArg . createMethod = "sequence"
2017-06-09 12:56:44 -04:00
testInfo := getTestNodeInfo ( f , itArg . getTestName ( ) , desc )
2022-12-12 04:11:10 -05:00
batchlag , e2eLags := runDensitySeqTest ( ctx , f , rc , itArg , testInfo )
2016-08-03 14:43:24 -04:00
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Verifying latency" )
2022-12-12 04:11:10 -05:00
logAndVerifyLatency ( ctx , batchlag , e2eLags , itArg . podStartupLimits , itArg . podBatchStartupLimit , testInfo , true )
2016-08-03 14:43:24 -04:00
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Verifying resource" )
2022-12-12 04:11:10 -05:00
logAndVerifyResource ( ctx , f , rc , itArg . cpuLimits , itArg . memLimits , testInfo , true )
2016-08-12 23:56:10 -04:00
} )
}
} )
2016-08-03 14:43:24 -04:00
2019-07-28 00:49:36 -04:00
ginkgo . Context ( "create a sequence of pods" , func ( ) {
2016-08-12 23:56:10 -04:00
dTests := [ ] densityTest {
{
podsNr : 10 ,
bgPodsNr : 50 ,
} ,
{
podsNr : 30 ,
bgPodsNr : 50 ,
} ,
{
podsNr : 50 ,
bgPodsNr : 50 ,
} ,
}
2016-08-03 14:43:24 -04:00
2016-08-12 23:56:10 -04:00
for _ , testArg := range dTests {
itArg := testArg
2018-05-21 20:16:10 -04:00
desc := fmt . Sprintf ( "latency/resource should be within limit when create %d pods with %d background pods [Benchmark][NodeSpeicalFeature:Benchmark]" , itArg . podsNr , itArg . bgPodsNr )
2022-10-17 08:47:15 -04:00
ginkgo . It ( desc , func ( ctx context . Context ) {
2016-08-17 13:38:43 -04:00
itArg . createMethod = "sequence"
2017-06-09 12:56:44 -04:00
testInfo := getTestNodeInfo ( f , itArg . getTestName ( ) , desc )
2022-12-12 04:11:10 -05:00
batchlag , e2eLags := runDensitySeqTest ( ctx , f , rc , itArg , testInfo )
2016-08-03 14:43:24 -04:00
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Verifying latency" )
2022-12-12 04:11:10 -05:00
logAndVerifyLatency ( ctx , batchlag , e2eLags , itArg . podStartupLimits , itArg . podBatchStartupLimit , testInfo , false )
2016-08-03 14:43:24 -04:00
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Verifying resource" )
2022-12-12 04:11:10 -05:00
logAndVerifyResource ( ctx , f , rc , itArg . cpuLimits , itArg . memLimits , testInfo , false )
2016-08-03 14:43:24 -04:00
} )
}
} )
} )
2016-08-10 13:48:32 -04:00
type densityTest struct {
2016-08-03 14:43:24 -04:00
// number of pods
podsNr int
// number of background pods
bgPodsNr int
// interval between creating pod (rate control)
interval time . Duration
2016-08-17 13:38:43 -04:00
// create pods in 'batch' or 'sequence'
createMethod string
2016-09-09 18:57:52 -04:00
// API QPS limit
APIQPSLimit int
2016-08-12 23:56:10 -04:00
// performance limits
2019-07-22 22:57:56 -04:00
cpuLimits e2ekubelet . ContainersCPUSummary
memLimits e2ekubelet . ResourceUsagePerContainer
2019-07-02 23:15:20 -04:00
podStartupLimits e2emetrics . LatencyMetric
2016-08-03 14:43:24 -04:00
podBatchStartupLimit time . Duration
}
2016-08-17 13:38:43 -04:00
func ( dt * densityTest ) getTestName ( ) string {
2016-09-09 18:57:52 -04:00
// The current default API QPS limit is 5
// TODO(coufon): is there any way to not hard code this?
APIQPSLimit := 5
if dt . APIQPSLimit > 0 {
APIQPSLimit = dt . APIQPSLimit
}
return fmt . Sprintf ( "density_create_%s_%d_%d_%d_%d" , dt . createMethod , dt . podsNr , dt . bgPodsNr ,
dt . interval . Nanoseconds ( ) / 1000000 , APIQPSLimit )
2016-08-17 13:38:43 -04:00
}
2016-08-12 23:56:10 -04:00
// runDensityBatchTest runs the density batch pod creation test
2022-12-12 04:11:10 -05:00
func runDensityBatchTest ( ctx context . Context , f * framework . Framework , rc * ResourceCollector , testArg densityTest , testInfo map [ string ] string ,
2019-07-02 23:15:20 -04:00
isLogTimeSeries bool ) ( time . Duration , [ ] e2emetrics . PodLatencyData ) {
2016-08-12 23:56:10 -04:00
const (
podType = "density_test_pod"
sleepBeforeCreatePods = 30 * time . Second
)
var (
mutex = & sync . Mutex { }
2016-12-03 13:57:26 -05:00
watchTimes = make ( map [ string ] metav1 . Time , 0 )
2016-08-12 23:56:10 -04:00
stopCh = make ( chan struct { } )
)
// create test pod data structure
2018-01-09 01:42:02 -05:00
pods := newTestPods ( testArg . podsNr , true , imageutils . GetPauseImageName ( ) , podType )
2016-08-12 23:56:10 -04:00
// the controller watches the change of pod status
2022-12-12 04:11:10 -05:00
controller := newInformerWatchPod ( ctx , f , mutex , watchTimes , podType )
2016-08-12 23:56:10 -04:00
go controller . Run ( stopCh )
defer close ( stopCh )
// TODO(coufon): in the test we found kubelet starts while it is busy on something, as a result 'syncLoop'
// does not response to pod creation immediately. Creating the first pod has a delay around 5s.
// The node status has already been 'ready' so `wait and check node being ready does not help here.
// Now wait here for a grace period to let 'syncLoop' be ready
time . Sleep ( sleepBeforeCreatePods )
rc . Start ( )
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Creating a batch of pods" )
2016-08-12 23:56:10 -04:00
// It returns a map['pod name']'creation time' containing the creation timestamps
2022-12-12 04:11:10 -05:00
createTimes := createBatchPodWithRateControl ( ctx , f , pods , testArg . interval )
2016-08-12 23:56:10 -04:00
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Waiting for all Pods to be observed by the watch..." )
2016-08-12 23:56:10 -04:00
2022-12-12 04:11:10 -05:00
gomega . Eventually ( ctx , func ( ) bool {
2016-08-12 23:56:10 -04:00
return len ( watchTimes ) == testArg . podsNr
2019-07-28 00:49:36 -04:00
} , 10 * time . Minute , 10 * time . Second ) . Should ( gomega . BeTrue ( ) )
2016-08-12 23:56:10 -04:00
if len ( watchTimes ) < testArg . podsNr {
2019-08-27 05:18:43 -04:00
framework . Failf ( "Timeout reached waiting for all Pods to be observed by the watch." )
2016-08-12 23:56:10 -04:00
}
// Analyze results
var (
2016-12-03 13:57:26 -05:00
firstCreate metav1 . Time
lastRunning metav1 . Time
2016-08-12 23:56:10 -04:00
init = true
2019-07-02 23:15:20 -04:00
e2eLags = make ( [ ] e2emetrics . PodLatencyData , 0 )
2016-08-12 23:56:10 -04:00
)
for name , create := range createTimes {
2023-10-10 22:37:36 -04:00
watch := watchTimes [ name ]
gomega . Expect ( watchTimes ) . To ( gomega . HaveKey ( name ) )
2016-08-12 23:56:10 -04:00
e2eLags = append ( e2eLags ,
2019-07-02 23:15:20 -04:00
e2emetrics . PodLatencyData { Name : name , Latency : watch . Time . Sub ( create . Time ) } )
2016-08-12 23:56:10 -04:00
if ! init {
if firstCreate . Time . After ( create . Time ) {
firstCreate = create
}
if lastRunning . Time . Before ( watch . Time ) {
lastRunning = watch
}
} else {
init = false
firstCreate , lastRunning = create , watch
}
}
2019-07-02 23:15:20 -04:00
sort . Sort ( e2emetrics . LatencySlice ( e2eLags ) )
2016-08-12 23:56:10 -04:00
batchLag := lastRunning . Time . Sub ( firstCreate . Time )
2017-02-02 11:51:01 -05:00
rc . Stop ( )
2022-12-12 04:11:10 -05:00
deletePodsSync ( ctx , f , pods )
2017-02-02 11:51:01 -05:00
2016-08-17 13:38:43 -04:00
// Log time series data.
if isLogTimeSeries {
2016-09-08 20:24:26 -04:00
logDensityTimeSeries ( rc , createTimes , watchTimes , testInfo )
2016-08-17 13:38:43 -04:00
}
// Log throughput data.
2016-09-08 20:24:26 -04:00
logPodCreateThroughput ( batchLag , e2eLags , testArg . podsNr , testInfo )
2016-08-17 13:38:43 -04:00
2022-12-12 04:11:10 -05:00
deletePodsSync ( ctx , f , [ ] * v1 . Pod { getCadvisorPod ( ) } )
2017-02-03 13:33:43 -05:00
2016-08-12 23:56:10 -04:00
return batchLag , e2eLags
}
// runDensitySeqTest runs the density sequential pod creation test
2022-12-12 04:11:10 -05:00
func runDensitySeqTest ( ctx context . Context , f * framework . Framework , rc * ResourceCollector , testArg densityTest , testInfo map [ string ] string ) ( time . Duration , [ ] e2emetrics . PodLatencyData ) {
2016-08-12 23:56:10 -04:00
const (
podType = "density_test_pod"
sleepBeforeCreatePods = 30 * time . Second
)
2018-01-09 01:42:02 -05:00
bgPods := newTestPods ( testArg . bgPodsNr , true , imageutils . GetPauseImageName ( ) , "background_pod" )
testPods := newTestPods ( testArg . podsNr , true , imageutils . GetPauseImageName ( ) , podType )
2016-08-12 23:56:10 -04:00
2019-07-28 00:49:36 -04:00
ginkgo . By ( "Creating a batch of background pods" )
2016-08-12 23:56:10 -04:00
// CreatBatch is synchronized, all pods are running when it returns
2022-12-12 04:11:10 -05:00
e2epod . NewPodClient ( f ) . CreateBatch ( ctx , bgPods )
2016-08-12 23:56:10 -04:00
time . Sleep ( sleepBeforeCreatePods )
rc . Start ( )
2016-08-19 17:21:04 -04:00
// Create pods sequentially (back-to-back). e2eLags have been sorted.
2022-12-12 04:11:10 -05:00
batchlag , e2eLags := createBatchPodSequential ( ctx , f , testPods , podType )
2016-08-12 23:56:10 -04:00
2017-02-02 11:51:01 -05:00
rc . Stop ( )
2022-12-12 04:11:10 -05:00
deletePodsSync ( ctx , f , append ( bgPods , testPods ... ) )
2017-02-02 11:51:01 -05:00
2016-08-17 13:38:43 -04:00
// Log throughput data.
2016-09-08 20:24:26 -04:00
logPodCreateThroughput ( batchlag , e2eLags , testArg . podsNr , testInfo )
2016-08-17 13:38:43 -04:00
2022-12-12 04:11:10 -05:00
deletePodsSync ( ctx , f , [ ] * v1 . Pod { getCadvisorPod ( ) } )
2017-02-03 13:33:43 -05:00
2016-08-12 23:56:10 -04:00
return batchlag , e2eLags
}
// createBatchPodWithRateControl creates a batch of pods concurrently, uses one goroutine for each creation.
2016-08-03 14:43:24 -04:00
// between creations there is an interval for throughput control
2022-12-12 04:11:10 -05:00
func createBatchPodWithRateControl ( ctx context . Context , f * framework . Framework , pods [ ] * v1 . Pod , interval time . Duration ) map [ string ] metav1 . Time {
2016-12-03 13:57:26 -05:00
createTimes := make ( map [ string ] metav1 . Time )
2021-11-14 22:54:47 -05:00
for i := range pods {
pod := pods [ i ]
2016-12-03 13:57:26 -05:00
createTimes [ pod . ObjectMeta . Name ] = metav1 . Now ( )
2022-12-12 04:11:10 -05:00
go e2epod . NewPodClient ( f ) . Create ( ctx , pod )
2016-08-03 14:43:24 -04:00
time . Sleep ( interval )
}
return createTimes
}
2016-08-12 23:56:10 -04:00
// getPodStartLatency gets prometheus metric 'pod start latency' from kubelet
2022-12-12 04:11:10 -05:00
func getPodStartLatency ( ctx context . Context , node string ) ( e2emetrics . KubeletLatencyMetrics , error ) {
2019-07-29 04:23:58 -04:00
latencyMetrics := e2emetrics . KubeletLatencyMetrics { }
2022-12-12 04:11:10 -05:00
ms , err := e2emetrics . GrabKubeletMetricsWithoutProxy ( ctx , node , "/metrics" )
2019-06-08 13:44:08 -04:00
framework . ExpectNoError ( err , "Failed to get kubelet metrics without proxy in node %s" , node )
2016-08-03 14:43:24 -04:00
for _ , samples := range ms {
for _ , sample := range samples {
2019-02-18 04:40:04 -05:00
if sample . Metric [ "__name__" ] == kubemetrics . KubeletSubsystem + "_" + kubemetrics . PodStartDurationKey {
2016-08-03 14:43:24 -04:00
quantile , _ := strconv . ParseFloat ( string ( sample . Metric [ "quantile" ] ) , 64 )
latencyMetrics = append ( latencyMetrics ,
2019-07-29 04:23:58 -04:00
e2emetrics . KubeletLatencyMetric {
2016-08-03 14:43:24 -04:00
Quantile : quantile ,
2019-02-18 04:40:04 -05:00
Method : kubemetrics . PodStartDurationKey ,
2016-08-03 14:43:24 -04:00
Latency : time . Duration ( int ( sample . Value ) ) * time . Microsecond } )
}
}
}
return latencyMetrics , nil
}
2016-08-10 13:48:32 -04:00
// newInformerWatchPod creates an informer to check whether all pods are running.
2022-12-12 04:11:10 -05:00
func newInformerWatchPod ( ctx context . Context , f * framework . Framework , mutex * sync . Mutex , watchTimes map [ string ] metav1 . Time , podType string ) cache . Controller {
2016-08-03 14:43:24 -04:00
ns := f . Namespace . Name
2016-11-18 15:55:46 -05:00
checkPodRunning := func ( p * v1 . Pod ) {
2016-08-03 14:43:24 -04:00
mutex . Lock ( )
defer mutex . Unlock ( )
2019-07-28 00:49:36 -04:00
defer ginkgo . GinkgoRecover ( )
2016-08-03 14:43:24 -04:00
2016-11-18 15:55:46 -05:00
if p . Status . Phase == v1 . PodRunning {
2016-08-03 14:43:24 -04:00
if _ , found := watchTimes [ p . Name ] ; ! found {
2016-12-03 13:57:26 -05:00
watchTimes [ p . Name ] = metav1 . Now ( )
2016-08-03 14:43:24 -04:00
}
}
}
2016-09-14 14:35:38 -04:00
_ , controller := cache . NewInformer (
2016-08-03 14:43:24 -04:00
& cache . ListWatch {
2017-01-21 22:36:02 -05:00
ListFunc : func ( options metav1 . ListOptions ) ( runtime . Object , error ) {
2016-11-18 15:55:46 -05:00
options . LabelSelector = labels . SelectorFromSet ( labels . Set { "type" : podType } ) . String ( )
2022-12-12 04:11:10 -05:00
obj , err := f . ClientSet . CoreV1 ( ) . Pods ( ns ) . List ( ctx , options )
2016-10-18 09:00:38 -04:00
return runtime . Object ( obj ) , err
2016-08-03 14:43:24 -04:00
} ,
2017-01-21 22:36:02 -05:00
WatchFunc : func ( options metav1 . ListOptions ) ( watch . Interface , error ) {
2016-11-18 15:55:46 -05:00
options . LabelSelector = labels . SelectorFromSet ( labels . Set { "type" : podType } ) . String ( )
2022-12-12 04:11:10 -05:00
return f . ClientSet . CoreV1 ( ) . Pods ( ns ) . Watch ( ctx , options )
2016-08-03 14:43:24 -04:00
} ,
} ,
2016-11-18 15:55:46 -05:00
& v1 . Pod { } ,
2016-08-03 14:43:24 -04:00
0 ,
2016-09-14 14:35:38 -04:00
cache . ResourceEventHandlerFuncs {
2016-08-03 14:43:24 -04:00
AddFunc : func ( obj interface { } ) {
2016-11-18 15:55:46 -05:00
p , ok := obj . ( * v1 . Pod )
2023-10-10 22:37:36 -04:00
if ! ok {
framework . Failf ( "Failed to cast object %T to Pod" , obj )
}
2016-08-03 14:43:24 -04:00
go checkPodRunning ( p )
} ,
UpdateFunc : func ( oldObj , newObj interface { } ) {
2016-11-18 15:55:46 -05:00
p , ok := newObj . ( * v1 . Pod )
2023-10-10 22:37:36 -04:00
if ! ok {
framework . Failf ( "Failed to cast object %T to Pod" , newObj )
}
2016-08-03 14:43:24 -04:00
go checkPodRunning ( p )
} ,
} ,
)
return controller
}
2017-09-26 07:49:56 -04:00
// createBatchPodSequential creates pods back-to-back in sequence.
2022-12-12 04:11:10 -05:00
func createBatchPodSequential ( ctx context . Context , f * framework . Framework , pods [ ] * v1 . Pod , podType string ) ( time . Duration , [ ] e2emetrics . PodLatencyData ) {
2022-03-24 02:02:00 -04:00
var (
mutex = & sync . Mutex { }
watchTimes = make ( map [ string ] metav1 . Time , 0 )
stopCh = make ( chan struct { } )
firstCreate metav1 . Time
lastRunning metav1 . Time
init = true
)
// the controller watches the change of pod status
2022-12-12 04:11:10 -05:00
controller := newInformerWatchPod ( ctx , f , mutex , watchTimes , podType )
2022-03-24 02:02:00 -04:00
go controller . Run ( stopCh )
defer close ( stopCh )
2016-12-03 13:57:26 -05:00
batchStartTime := metav1 . Now ( )
2019-07-02 23:15:20 -04:00
e2eLags := make ( [ ] e2emetrics . PodLatencyData , 0 )
2022-03-24 02:02:00 -04:00
createTimes := make ( map [ string ] metav1 . Time )
2016-08-12 23:56:10 -04:00
for _ , pod := range pods {
2016-12-03 13:57:26 -05:00
create := metav1 . Now ( )
2022-03-24 02:02:00 -04:00
createTimes [ pod . Name ] = create
2022-12-12 04:11:10 -05:00
p := e2epod . NewPodClient ( f ) . Create ( ctx , pod )
2023-10-19 10:50:21 -04:00
framework . ExpectNoError ( wait . PollUntilContextTimeout ( ctx , 2 * time . Second , framework . PodStartTimeout , true , podWatchedRunning ( watchTimes , p . Name ) ) )
2016-08-12 23:56:10 -04:00
e2eLags = append ( e2eLags ,
2022-03-24 02:02:00 -04:00
e2emetrics . PodLatencyData { Name : pod . Name , Latency : watchTimes [ pod . Name ] . Time . Sub ( create . Time ) } )
2016-08-12 23:56:10 -04:00
}
2022-03-24 02:02:00 -04:00
for name , create := range createTimes {
2023-10-10 22:37:36 -04:00
watch := watchTimes [ name ]
gomega . Expect ( watchTimes ) . To ( gomega . HaveKey ( name ) )
2022-03-24 02:02:00 -04:00
if ! init {
if firstCreate . Time . After ( create . Time ) {
firstCreate = create
}
if lastRunning . Time . Before ( watch . Time ) {
lastRunning = watch
}
} else {
init = false
firstCreate , lastRunning = create , watch
}
}
batchLag := lastRunning . Time . Sub ( batchStartTime . Time )
2019-07-02 23:15:20 -04:00
sort . Sort ( e2emetrics . LatencySlice ( e2eLags ) )
2016-08-12 23:56:10 -04:00
return batchLag , e2eLags
}
2022-03-24 02:02:00 -04:00
// podWatchedRunning verifies whether the pod becomes Running, as the watchTime was set by informer
2022-12-12 04:11:10 -05:00
func podWatchedRunning ( watchTimes map [ string ] metav1 . Time , podName string ) wait . ConditionWithContextFunc {
return func ( ctx context . Context ) ( done bool , err error ) {
2022-03-24 02:02:00 -04:00
if _ , found := watchTimes [ podName ] ; found {
return true , nil
}
return false , nil
}
}
2019-11-21 21:27:13 -05:00
// verifyLatencyWithinThreshold verifies whether 50, 90 and 99th percentiles of a latency metric are
// within the expected threshold.
func verifyLatencyWithinThreshold ( threshold , actual e2emetrics . LatencyMetric , metricName string ) error {
if actual . Perc50 > threshold . Perc50 {
return fmt . Errorf ( "too high %v latency 50th percentile: %v" , metricName , actual . Perc50 )
}
if actual . Perc90 > threshold . Perc90 {
return fmt . Errorf ( "too high %v latency 90th percentile: %v" , metricName , actual . Perc90 )
}
if actual . Perc99 > threshold . Perc99 {
return fmt . Errorf ( "too high %v latency 99th percentile: %v" , metricName , actual . Perc99 )
}
return nil
}
2019-12-10 05:53:21 -05:00
// extractLatencyMetrics returns latency metrics for each percentile(50th, 90th and 99th).
func extractLatencyMetrics ( latencies [ ] e2emetrics . PodLatencyData ) e2emetrics . LatencyMetric {
length := len ( latencies )
perc50 := latencies [ int ( math . Ceil ( float64 ( length * 50 ) / 100 ) ) - 1 ] . Latency
perc90 := latencies [ int ( math . Ceil ( float64 ( length * 90 ) / 100 ) ) - 1 ] . Latency
perc99 := latencies [ int ( math . Ceil ( float64 ( length * 99 ) / 100 ) ) - 1 ] . Latency
perc100 := latencies [ length - 1 ] . Latency
return e2emetrics . LatencyMetric { Perc50 : perc50 , Perc90 : perc90 , Perc99 : perc99 , Perc100 : perc100 }
}
// printLatencies outputs latencies to log with readable format.
func printLatencies ( latencies [ ] e2emetrics . PodLatencyData , header string ) {
metrics := extractLatencyMetrics ( latencies )
2020-01-02 02:52:05 -05:00
framework . Logf ( "10%% %s: %v" , header , latencies [ ( len ( latencies ) * 9 ) / 10 : ] )
framework . Logf ( "perc50: %v, perc90: %v, perc99: %v" , metrics . Perc50 , metrics . Perc90 , metrics . Perc99 )
2019-12-10 05:53:21 -05:00
}
2016-08-17 13:38:43 -04:00
// logAndVerifyLatency verifies that whether pod creation latency satisfies the limit.
2022-12-12 04:11:10 -05:00
func logAndVerifyLatency ( ctx context . Context , batchLag time . Duration , e2eLags [ ] e2emetrics . PodLatencyData , podStartupLimits e2emetrics . LatencyMetric ,
2016-09-08 20:24:26 -04:00
podBatchStartupLimit time . Duration , testInfo map [ string ] string , isVerify bool ) {
2019-12-10 05:53:21 -05:00
printLatencies ( e2eLags , "worst client e2e total latencies" )
2016-08-03 14:43:24 -04:00
2016-08-17 13:38:43 -04:00
// TODO(coufon): do not trust 'kubelet' metrics since they are not reset!
2022-12-12 04:11:10 -05:00
latencyMetrics , _ := getPodStartLatency ( ctx , kubeletAddr )
2019-08-28 20:11:53 -04:00
framework . Logf ( "Kubelet Prometheus metrics (not reset):\n%s" , framework . PrettyPrintJSON ( latencyMetrics ) )
2016-08-03 14:43:24 -04:00
2019-12-10 05:53:21 -05:00
podStartupLatency := extractLatencyMetrics ( e2eLags )
2016-08-03 14:43:24 -04:00
2016-08-17 13:38:43 -04:00
// log latency perf data
2018-03-21 10:24:56 -04:00
logPerfData ( getLatencyPerfData ( podStartupLatency , testInfo ) , "latency" )
2016-08-03 14:43:24 -04:00
2016-08-12 23:56:10 -04:00
if isVerify {
2016-08-17 13:38:43 -04:00
// check whether e2e pod startup time is acceptable.
2019-11-21 21:27:13 -05:00
framework . ExpectNoError ( verifyLatencyWithinThreshold ( podStartupLimits , podStartupLatency , "pod startup" ) )
2016-08-12 23:56:10 -04:00
// check bactch pod creation latency
2016-08-17 13:38:43 -04:00
if podBatchStartupLimit > 0 {
2023-10-10 22:37:36 -04:00
if batchLag > podBatchStartupLimit {
framework . Failf ( "Batch creation startup time %v exceed limit %v" , batchLag , podBatchStartupLimit )
}
2016-08-12 23:56:10 -04:00
}
2016-08-03 14:43:24 -04:00
}
}
2016-08-17 13:38:43 -04:00
// logThroughput calculates and logs pod creation throughput.
2019-07-02 23:15:20 -04:00
func logPodCreateThroughput ( batchLag time . Duration , e2eLags [ ] e2emetrics . PodLatencyData , podsNr int , testInfo map [ string ] string ) {
2017-06-09 12:56:44 -04:00
logPerfData ( getThroughputPerfData ( batchLag , e2eLags , podsNr , testInfo ) , "throughput" )
2016-08-17 13:38:43 -04:00
}