mirror of
https://github.com/kubernetes/kubernetes.git
synced 2026-02-13 15:59:57 -05:00
ktesting: replace Begin/End with TContext.Step
Manually pairing Being with End is too error prone to be useful. It had the advantage of keeping variables created between them visible to the following code, but that doesn't justify using those calls. By using a callback we can achieve a few things: - Code using it automatically shadows the parent tCtx, thus enforcing that within a code block the tCtx with step is used consistently. - The code block is clearly delineated with curly braces. - When the code block ends, the unmodified parent tCtx is automatically in scope again. Downsides: - Extra boilerplate for the anonymous function. Python's `with tCtx.Step(...) as tCtx: ` would be nicer. As an approximation of that `for tCtx := range tCtx.Step(...)` was tried with `Step` returning an iterator, but that wasn't very idiomatic. - Variables created inside the code block are not visible outside of it.
This commit is contained in:
parent
d17aaf5e29
commit
047682908d
3 changed files with 93 additions and 115 deletions
|
|
@ -123,28 +123,32 @@ func testUpgradeDowngrade(tCtx ktesting.TContext) {
|
|||
}
|
||||
|
||||
// Determine what we need to downgrade to.
|
||||
tCtx = ktesting.Begin(tCtx, "get source code version")
|
||||
gitVersion, _, err := sourceVersion(tCtx, repoRoot)
|
||||
tCtx.ExpectNoError(err, "determine source code version for repo root %q", repoRoot)
|
||||
version, err := version.ParseGeneric(gitVersion)
|
||||
tCtx.ExpectNoError(err, "parse version %s of repo root %q", gitVersion, repoRoot)
|
||||
major, previousMinor := version.Major(), version.Minor()-1
|
||||
if strings.Contains(gitVersion, "-alpha.0") {
|
||||
// All version up to and including x.y.z-alpha.0 are treated as if we were
|
||||
// still the previous minor version x.(y-1). There are two reason for this:
|
||||
//
|
||||
// - During code freeze around (at?) -rc.0, the master branch already
|
||||
// identfies itself as the next release with -alpha.0. Without this
|
||||
// special case, we would change the version skew testing from what
|
||||
// has been tested and been known to work to something else, which
|
||||
// can and at least once did break.
|
||||
//
|
||||
// - Early in the next cycle the differences compared to the previous
|
||||
// release are small, so it's more interesting to go back further.
|
||||
previousMinor--
|
||||
}
|
||||
tCtx.Logf("got version: major: %d, minor: %d, previous minor: %d", major, version.Minor(), previousMinor)
|
||||
tCtx = ktesting.End(tCtx)
|
||||
var major, previousMinor uint
|
||||
var gitVersion string
|
||||
|
||||
tCtx.Step("get source code version", func(tCtx ktesting.TContext) {
|
||||
var err error
|
||||
gitVersion, _, err = sourceVersion(tCtx, repoRoot)
|
||||
tCtx.ExpectNoError(err, "determine source code version for repo root %q", repoRoot)
|
||||
version, err := version.ParseGeneric(gitVersion)
|
||||
tCtx.ExpectNoError(err, "parse version %s of repo root %q", gitVersion, repoRoot)
|
||||
major, previousMinor = version.Major(), version.Minor()-1
|
||||
if strings.Contains(gitVersion, "-alpha.0") {
|
||||
// All version up to and including x.y.z-alpha.0 are treated as if we were
|
||||
// still the previous minor version x.(y-1). There are two reason for this:
|
||||
//
|
||||
// - During code freeze around (at?) -rc.0, the master branch already
|
||||
// identfies itself as the next release with -alpha.0. Without this
|
||||
// special case, we would change the version skew testing from what
|
||||
// has been tested and been known to work to something else, which
|
||||
// can and at least once did break.
|
||||
//
|
||||
// - Early in the next cycle the differences compared to the previous
|
||||
// release are small, so it's more interesting to go back further.
|
||||
previousMinor--
|
||||
}
|
||||
tCtx.Logf("got version: major: %d, minor: %d, previous minor: %d", major, version.Minor(), previousMinor)
|
||||
})
|
||||
|
||||
// KUBERNETES_SERVER_CACHE_DIR can be set to keep downloaded files across test restarts.
|
||||
binDir, cacheBinaries := os.LookupEnv("KUBERNETES_SERVER_CACHE_DIR")
|
||||
|
|
@ -154,16 +158,18 @@ func testUpgradeDowngrade(tCtx ktesting.TContext) {
|
|||
haveBinaries := false
|
||||
|
||||
// Get the previous release.
|
||||
tCtx = ktesting.Begin(tCtx, "get previous release info")
|
||||
tCtx.Logf("stable release %d.%d", major, previousMinor)
|
||||
previousURL, previousVersion, err := serverDownloadURL(tCtx, "stable", major, previousMinor)
|
||||
if errors.Is(err, errHTTP404) {
|
||||
tCtx.Logf("stable doesn't exist, get latest release %d.%d", major, previousMinor)
|
||||
previousURL, previousVersion, err = serverDownloadURL(tCtx, "latest", major, previousMinor)
|
||||
}
|
||||
tCtx.ExpectNoError(err)
|
||||
tCtx.Logf("got previous release version: %s, URL: %s", previousVersion, previousURL)
|
||||
tCtx = ktesting.End(tCtx)
|
||||
var previousURL, previousVersion string
|
||||
tCtx.Step("get previous release info", func(tCtx ktesting.TContext) {
|
||||
tCtx.Logf("stable release %d.%d", major, previousMinor)
|
||||
var err error
|
||||
previousURL, previousVersion, err = serverDownloadURL(tCtx, "stable", major, previousMinor)
|
||||
if errors.Is(err, errHTTP404) {
|
||||
tCtx.Logf("stable doesn't exist, get latest release %d.%d", major, previousMinor)
|
||||
previousURL, previousVersion, err = serverDownloadURL(tCtx, "latest", major, previousMinor)
|
||||
}
|
||||
tCtx.ExpectNoError(err)
|
||||
tCtx.Logf("got previous release version: %s, URL: %s", previousVersion, previousURL)
|
||||
})
|
||||
|
||||
if cacheBinaries {
|
||||
binDir = path.Join(binDir, previousVersion)
|
||||
|
|
@ -173,51 +179,53 @@ func testUpgradeDowngrade(tCtx ktesting.TContext) {
|
|||
}
|
||||
}
|
||||
if !haveBinaries {
|
||||
tCtx = ktesting.Begin(tCtx, fmt.Sprintf("download and unpack %s", previousURL))
|
||||
req, err := http.NewRequestWithContext(tCtx, http.MethodGet, previousURL, nil)
|
||||
tCtx.ExpectNoError(err, "construct request")
|
||||
response, err := http.DefaultClient.Do(req)
|
||||
tCtx.ExpectNoError(err, "download")
|
||||
defer func() {
|
||||
_ = response.Body.Close()
|
||||
}()
|
||||
decompress, err := gzip.NewReader(response.Body)
|
||||
tCtx.ExpectNoError(err, "construct gzip reader")
|
||||
unpack := tar.NewReader(decompress)
|
||||
for {
|
||||
header, err := unpack.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
tCtx.Step(fmt.Sprintf("download and unpack %s", previousURL), func(tCtx ktesting.TContext) {
|
||||
req, err := http.NewRequestWithContext(tCtx, http.MethodGet, previousURL, nil)
|
||||
tCtx.ExpectNoError(err, "construct request")
|
||||
response, err := http.DefaultClient.Do(req)
|
||||
tCtx.ExpectNoError(err, "download")
|
||||
defer func() {
|
||||
_ = response.Body.Close()
|
||||
}()
|
||||
decompress, err := gzip.NewReader(response.Body)
|
||||
tCtx.ExpectNoError(err, "construct gzip reader")
|
||||
unpack := tar.NewReader(decompress)
|
||||
for {
|
||||
header, err := unpack.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
base := path.Base(header.Name)
|
||||
if slices.Contains(localupcluster.KubeClusterComponents, localupcluster.KubeComponentName(base)) {
|
||||
data, err := io.ReadAll(unpack)
|
||||
tCtx.ExpectNoError(err, fmt.Sprintf("read content of %s", header.Name))
|
||||
tCtx.ExpectNoError(os.MkdirAll(binDir, 0755), "create directory for binaries")
|
||||
tCtx.ExpectNoError(os.WriteFile(path.Join(binDir, base), data, 0555), fmt.Sprintf("write content of %s", header.Name))
|
||||
}
|
||||
}
|
||||
base := path.Base(header.Name)
|
||||
if slices.Contains(localupcluster.KubeClusterComponents, localupcluster.KubeComponentName(base)) {
|
||||
data, err := io.ReadAll(unpack)
|
||||
tCtx.ExpectNoError(err, fmt.Sprintf("read content of %s", header.Name))
|
||||
tCtx.ExpectNoError(os.MkdirAll(binDir, 0755), "create directory for binaries")
|
||||
tCtx.ExpectNoError(os.WriteFile(path.Join(binDir, base), data, 0555), fmt.Sprintf("write content of %s", header.Name))
|
||||
}
|
||||
}
|
||||
tCtx = ktesting.End(tCtx)
|
||||
})
|
||||
}
|
||||
|
||||
tCtx = ktesting.Begin(tCtx, fmt.Sprintf("bring up v%d.%d", major, previousMinor))
|
||||
cluster := localupcluster.New(tCtx)
|
||||
localUpClusterEnv := map[string]string{
|
||||
"RUNTIME_CONFIG": "resource.k8s.io/v1beta1,resource.k8s.io/v1beta2",
|
||||
"FEATURE_GATES": "DynamicResourceAllocation=true",
|
||||
// *not* needed because driver will run in "local filesystem" mode (= driver.IsLocal): "ALLOW_PRIVILEGED": "1",
|
||||
}
|
||||
cluster.Start(tCtx, binDir, localUpClusterEnv)
|
||||
tCtx = ktesting.End(tCtx)
|
||||
var cluster *localupcluster.Cluster
|
||||
tCtx.Step(fmt.Sprintf("bring up v%d.%d", major, previousMinor), func(tCtx ktesting.TContext) {
|
||||
cluster = localupcluster.New(tCtx)
|
||||
localUpClusterEnv := map[string]string{
|
||||
"RUNTIME_CONFIG": "resource.k8s.io/v1beta1,resource.k8s.io/v1beta2",
|
||||
"FEATURE_GATES": "DynamicResourceAllocation=true",
|
||||
// *not* needed because driver will run in "local filesystem" mode (= driver.IsLocal): "ALLOW_PRIVILEGED": "1",
|
||||
}
|
||||
cluster.Start(tCtx, binDir, localUpClusterEnv)
|
||||
})
|
||||
|
||||
restConfig := cluster.LoadConfig(tCtx)
|
||||
restConfig.UserAgent = fmt.Sprintf("%s -- dra", restclient.DefaultKubernetesUserAgent())
|
||||
tCtx = tCtx.WithRESTConfig(restConfig).WithNamespace("default")
|
||||
|
||||
tCtx = ktesting.Begin(tCtx, fmt.Sprintf("v%d.%d", major, previousMinor))
|
||||
|
||||
tCtx.ExpectNoError(e2enode.WaitForAllNodesSchedulable(tCtx, tCtx.Client(), 5*time.Minute), "wait for all nodes to be schedulable")
|
||||
nodes := drautils.NewNodesNow(tCtx, 1, 1)
|
||||
var nodes *drautils.Nodes
|
||||
tCtx.Step(fmt.Sprintf("v%d.%d", major, previousMinor), func(tCtx ktesting.TContext) {
|
||||
tCtx.ExpectNoError(e2enode.WaitForAllNodesSchedulable(tCtx, tCtx.Client(), 5*time.Minute), "wait for all nodes to be schedulable")
|
||||
nodes = drautils.NewNodesNow(tCtx, 1, 1)
|
||||
})
|
||||
|
||||
// Opening sockets locally avoids intermittent errors and delays caused by proxying through the restarted apiserver.
|
||||
// We could speed up testing by shortening the sync delay in the ResourceSlice controller, but let's better
|
||||
|
|
@ -228,8 +236,6 @@ func testUpgradeDowngrade(tCtx ktesting.TContext) {
|
|||
b := drautils.NewBuilderNow(tCtx, driver)
|
||||
b.SkipCleanup = true
|
||||
|
||||
tCtx = ktesting.End(tCtx)
|
||||
|
||||
upgradedTestFuncs := make(map[string]upgradedTestFunc, len(subTests))
|
||||
tCtx.Run("after-cluster-creation", func(tCtx ktesting.TContext) {
|
||||
for subTest, f := range subTests {
|
||||
|
|
@ -242,18 +248,14 @@ func testUpgradeDowngrade(tCtx ktesting.TContext) {
|
|||
}
|
||||
})
|
||||
|
||||
tCtx = ktesting.Begin(tCtx, fmt.Sprintf("update to %s", gitVersion))
|
||||
// We could split this up into first updating the apiserver, then control plane components, then restarting kubelet.
|
||||
// For the purpose of this test here we we primarily care about full before/after comparisons, so not done yet.
|
||||
// TODO
|
||||
restoreOptions := cluster.Modify(tCtx, localupcluster.ModifyOptions{Upgrade: true, BinDir: dir})
|
||||
tCtx = ktesting.End(tCtx)
|
||||
restoreOptions := cluster.Modify(tCtx.WithStep(fmt.Sprintf("update to %s", gitVersion)), localupcluster.ModifyOptions{Upgrade: true, BinDir: dir})
|
||||
|
||||
// The kubelet wipes all ResourceSlices on a restart because it doesn't know which drivers were running.
|
||||
// Wait for the ResourceSlice controller in the driver to notice and recreate the ResourceSlices.
|
||||
tCtx = ktesting.Begin(tCtx, "wait for ResourceSlices")
|
||||
ktesting.Eventually(tCtx, driver.NewGetSlices()).WithTimeout(5 * time.Minute).Should(gomega.HaveField("Items", gomega.HaveLen(len(nodes.NodeNames))))
|
||||
tCtx = ktesting.End(tCtx)
|
||||
ktesting.Eventually(tCtx.WithStep("wait for ResourceSlices"), driver.NewGetSlices()).WithTimeout(5 * time.Minute).Should(gomega.HaveField("Items", gomega.HaveLen(len(nodes.NodeNames))))
|
||||
|
||||
downgradedTestFuncs := make(map[string]downgradedTestFunc, len(subTests))
|
||||
tCtx.Run("after-cluster-upgrade", func(tCtx ktesting.TContext) {
|
||||
|
|
@ -265,19 +267,15 @@ func testUpgradeDowngrade(tCtx ktesting.TContext) {
|
|||
})
|
||||
|
||||
// Roll back.
|
||||
tCtx = ktesting.Begin(tCtx, "downgrade")
|
||||
cluster.Modify(tCtx, restoreOptions)
|
||||
tCtx = ktesting.End(tCtx)
|
||||
cluster.Modify(tCtx.WithStep("downgrade"), restoreOptions)
|
||||
|
||||
// TODO: ensure that kube-controller-manager is up-and-running.
|
||||
// This works around https://github.com/kubernetes/kubernetes/issues/132334 and can be removed
|
||||
// once a fix for that is backported.
|
||||
tCtx = ktesting.Begin(tCtx, "wait for kube-controller-manager")
|
||||
ktesting.Eventually(tCtx, func(tCtx ktesting.TContext) string {
|
||||
ktesting.Eventually(tCtx.WithStep("wait for kube-controller-manager"), func(tCtx ktesting.TContext) string {
|
||||
output, _ := cluster.GetSystemLogs(tCtx, localupcluster.KubeControllerManager)
|
||||
return output
|
||||
}).Should(gomega.ContainSubstring(`"Caches are synced" controller="resource_claim"`))
|
||||
tCtx = ktesting.End(tCtx)
|
||||
|
||||
tCtx.Run("after-cluster-downgrade", func(tCtx ktesting.TContext) {
|
||||
for subTest, f := range downgradedTestFuncs {
|
||||
|
|
|
|||
|
|
@ -16,8 +16,6 @@ limitations under the License.
|
|||
|
||||
package ktesting
|
||||
|
||||
import "strings"
|
||||
|
||||
// Deprecated: use tCtx.WithStep instead
|
||||
func WithStep(tCtx TContext, step string) TContext {
|
||||
return tCtx.WithStep(step)
|
||||
|
|
@ -39,6 +37,12 @@ func (tc *TC) WithStep(step string) *TC {
|
|||
return tc
|
||||
}
|
||||
|
||||
// Deprecated: use tCtx.Step instead
|
||||
func Step(tCtx TContext, step string, cb func(tCtx TContext)) {
|
||||
tCtx.Helper()
|
||||
tCtx.Step(step, cb)
|
||||
}
|
||||
|
||||
// Step is useful when the context with the step information is
|
||||
// used more than once:
|
||||
//
|
||||
|
|
@ -52,29 +56,9 @@ func (tc *TC) WithStep(step string) *TC {
|
|||
// Inside the callback, the tCtx variable is the one where the step
|
||||
// has been added. This avoids the need to introduce multiple different
|
||||
// context variables and risk of using the wrong one.
|
||||
func Step(tCtx TContext, what string, cb func(tCtx TContext)) {
|
||||
tCtx.Helper()
|
||||
cb(WithStep(tCtx, what))
|
||||
}
|
||||
|
||||
// TODO: remove Begin+End when not needed anymore
|
||||
|
||||
// Deprecated
|
||||
func Begin(tCtx TContext, what string) TContext {
|
||||
return WithStep(tCtx, what)
|
||||
}
|
||||
|
||||
// Deprecated
|
||||
func End(tc *TC) TContext {
|
||||
// This is a quick hack to keep End working. Will be removed.
|
||||
tc = tc.clone()
|
||||
index := strings.LastIndex(strings.TrimSuffix(tc.steps, ": "), ": ")
|
||||
if index > 0 {
|
||||
tc.steps = tc.steps[:index+2]
|
||||
} else {
|
||||
tc.steps = ""
|
||||
}
|
||||
return tc
|
||||
func (tc *TC) Step(step string, cb func(tCtx TContext)) {
|
||||
tc.Helper()
|
||||
cb(WithStep(tc, step))
|
||||
}
|
||||
|
||||
// Value intercepts a search for the special "GINKGO_SPEC_CONTEXT" and
|
||||
|
|
|
|||
|
|
@ -354,8 +354,7 @@ func (c *Cluster) Modify(tCtx ktesting.TContext, options ModifyOptions) ModifyOp
|
|||
|
||||
func (c *Cluster) modifyComponent(tCtx ktesting.TContext, options ModifyOptions, component KubeComponentName, restore *ModifyOptions) {
|
||||
tCtx.Helper()
|
||||
tCtx = ktesting.Begin(tCtx, fmt.Sprintf("modify %s", component))
|
||||
defer ktesting.End(tCtx)
|
||||
tCtx = tCtx.WithStep(fmt.Sprintf("modify %s", component))
|
||||
|
||||
// We could also do things like turning feature gates on or off.
|
||||
// For now we only support replacing the file.
|
||||
|
|
@ -430,8 +429,7 @@ func (c *Cluster) runComponentWithRetry(tCtx ktesting.TContext, cmd *Cmd) {
|
|||
func (c *Cluster) checkReadiness(tCtx ktesting.TContext, cmd *Cmd) {
|
||||
restConfig := c.LoadConfig(tCtx)
|
||||
tCtx = ktesting.WithRESTConfig(tCtx, restConfig)
|
||||
tCtx = ktesting.Begin(tCtx, fmt.Sprintf("wait for %s readiness", cmd.Name))
|
||||
defer ktesting.End(tCtx)
|
||||
tCtx = tCtx.WithStep(fmt.Sprintf("wait for %s readiness", cmd.Name))
|
||||
|
||||
switch KubeComponentName(cmd.Name) {
|
||||
case KubeAPIServer:
|
||||
|
|
@ -446,9 +444,7 @@ func (c *Cluster) checkReadiness(tCtx ktesting.TContext, cmd *Cmd) {
|
|||
c.checkHealthz(tCtx, cmd, "https", c.settings["KUBELET_HOST"], c.settings["KUBELET_PORT"])
|
||||
|
||||
// Also wait for the node to be ready.
|
||||
tCtx = ktesting.Begin(tCtx, "wait for node ready")
|
||||
defer ktesting.End(tCtx)
|
||||
ktesting.Eventually(tCtx, func(tCtx ktesting.TContext) []corev1.Node {
|
||||
ktesting.Eventually(tCtx.WithStep("wait for node ready"), func(tCtx ktesting.TContext) []corev1.Node {
|
||||
nodes, err := tCtx.Client().CoreV1().Nodes().List(tCtx, metav1.ListOptions{})
|
||||
tCtx.ExpectNoError(err, "list nodes")
|
||||
return nodes.Items
|
||||
|
|
|
|||
Loading…
Reference in a new issue