Add sequential split-server test

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
Brad Davidson 2025-09-17 22:03:38 +00:00 committed by Brad Davidson
parent bfdcc7bcc8
commit a0ce2aaeaf
5 changed files with 60 additions and 36 deletions

1
.gitignore vendored
View file

@ -30,6 +30,7 @@ __pycache__
/tests/.tox/
/tests/.vscode
/tests/**/*log.txt
/tests/**/vagrant.log
/sonobuoy-output
*.tmp
config/local.tfvars

View file

@ -38,7 +38,8 @@ FROM vagrantlibvirt/vagrant-libvirt:sha-a94ce0d AS test-e2e
RUN apt-get update && apt-get install -y docker.io wget
ENV VAGRANT_DISABLE_STRICT_DEPENDENCY_ENFORCEMENT=1
RUN vagrant plugin install vagrant-k3s vagrant-reload vagrant-scp
RUN vagrant plugin install vagrant-k3s --plugin-version 0.4.0
RUN vagrant plugin install vagrant-reload vagrant-scp
# Workaround for older vagrant-libvirt image and new vagrant infra wesbites
# See https://github.com/hashicorp/vagrant/issues/13571 and

View file

@ -46,6 +46,7 @@ def provision(vm, role, role_num, node_num)
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.skip_start = true
end
elsif role.include?("server") && role.include?("etcd") && role_num != 0
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
@ -62,6 +63,7 @@ def provision(vm, role, role_num, node_num)
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.skip_start = true
end
elsif role.include?("server") && role.include?("cp")
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
@ -75,6 +77,7 @@ def provision(vm, role, role_num, node_num)
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.skip_start = true
end
elsif role.include?("server") && role.include?("all")
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
@ -85,6 +88,7 @@ def provision(vm, role, role_num, node_num)
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.skip_start = true
end
end
if role.include?("agent")
@ -92,6 +96,7 @@ def provision(vm, role, role_num, node_num)
k3s.args = %W[agent --server https://#{NETWORK_PREFIX}.101:6443 --flannel-iface=eth1]
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.skip_start = true
end
end
if vm.box.to_s.include?("microos")

View file

@ -78,7 +78,7 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount
return err
})
// libVirt/Virtualbox needs some time between provisioning nodes
time.Sleep(10 * time.Second)
time.Sleep(2 * time.Second)
}
if err := errg.Wait(); err != nil {
return etcdNodes, cpNodes, agentNodes, err
@ -105,8 +105,8 @@ func createSplitCluster(nodeOS string, etcdCount, controlPlaneCount, agentCount
_, err := e2e.RunCommand(cmd)
return err
})
// K3s needs some time between joining nodes to avoid learner issues
time.Sleep(10 * time.Second)
// libVirt/Virtualbox needs some time between provisioning nodes
time.Sleep(2 * time.Second)
}
if err := errg.Wait(); err != nil {
return etcdNodes, cpNodes, agentNodes, err
@ -121,17 +121,18 @@ func Test_E2ESplitServer(t *testing.T) {
RunSpecs(t, "Split Server Test Suite", suiteConfig, reporterConfig)
}
var (
tc *e2e.TestConfig // We don't use the Server and Agents from this
etcdNodes []e2e.VagrantNode
cpNodes []e2e.VagrantNode
agentNodes []e2e.VagrantNode
allNodes []e2e.VagrantNode
)
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify Create", Ordered, func() {
var _ = DescribeTableSubtree("Verify Create", Ordered, func(startFlags string) {
var (
tc *e2e.TestConfig // We don't use the Server and Agents from this
etcdNodes []e2e.VagrantNode
cpNodes []e2e.VagrantNode
agentNodes []e2e.VagrantNode
allNodes []e2e.VagrantNode
failed bool
)
Context("Cluster :", func() {
It("Starts up with no issues", func() {
var err error
@ -142,22 +143,36 @@ var _ = Describe("Verify Create", Ordered, func() {
fmt.Println("Etcd Server Nodes:", etcdNodes)
fmt.Println("Control Plane Server Nodes:", cpNodes)
fmt.Println("Agent Nodes:", agentNodes)
kubeConfigFile, err := e2e.GenKubeconfigFile(cpNodes[0].String())
tc = &e2e.TestConfig{
KubeconfigFile: kubeConfigFile,
Hardened: *hardened,
for _, node := range append(etcdNodes, cpNodes...) {
cmd := fmt.Sprintf("systemctl start k3s %s", startFlags)
_, err := node.RunCmdOnNode(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to start k3s")
}
Expect(err).NotTo(HaveOccurred())
for _, node := range agentNodes {
cmd := fmt.Sprintf("systemctl start k3s-agent %s", startFlags)
_, err := node.RunCmdOnNode(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to start k3s-agent")
}
Eventually(func() error {
kubeConfigFile, err := e2e.GenKubeconfigFile(cpNodes[0].String())
tc = &e2e.TestConfig{
KubeconfigFile: kubeConfigFile,
Hardened: *hardened,
}
return err
}, "60s", "5s").Should(Succeed(), "failed to get admin kubeconfig")
})
It("Checks node and pod status", func() {
allNodes = append(cpNodes, etcdNodes...)
allNodes = append(allNodes, agentNodes...)
By("Fetching Nodes status")
fmt.Printf("\nFetching Nodes status\n")
Eventually(func() error {
return tests.NodesReady(tc.KubeconfigFile, e2e.VagrantSlice(allNodes))
}, "620s", "5s").Should(Succeed())
e2e.DumpNodes(tc.KubeconfigFile)
fmt.Printf("\nFetching Pods status\n")
Eventually(func() error {
return tests.AllPodsUp(tc.KubeconfigFile)
}, "620s", "5s").Should(Succeed())
@ -266,21 +281,23 @@ var _ = Describe("Verify Create", Ordered, func() {
}, "420s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local"), "failed cmd: "+cmd)
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
AfterAll(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if failed {
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, allNodes))
} else {
Expect(e2e.GetCoverageReport(allNodes)).To(Succeed())
}
if !failed || *ci {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(tc.KubeconfigFile)).To(Succeed())
}
})
AfterAll(func() {
if failed {
AddReportEntry("journald-logs", e2e.TailJournalLogs(1000, allNodes))
} else {
Expect(e2e.GetCoverageReport(allNodes)).To(Succeed())
}
if !failed || *ci {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(tc.KubeconfigFile)).To(Succeed())
}
})
},
Entry("concurrently", "--no-block"),
Entry("sequentially", ""),
)

View file

@ -58,7 +58,7 @@ var _ = Describe("Verify Create", Ordered, func() {
})
It("Checks node and pod status", func() {
fmt.Printf("\nFetching node status\n")
fmt.Printf("\nFetching Nodes status\n")
Eventually(func() error {
return tests.NodesReady(tc.KubeconfigFile, e2e.VagrantSlice(tc.AllNodes()))
}, "620s", "5s").Should(Succeed())