mirror of
https://github.com/hashicorp/terraform.git
synced 2025-12-18 23:26:07 -05:00
test: create all edges to leaf cleanup nodes (#37564)
This commit is contained in:
parent
bbb8af989c
commit
af8cc7e315
7 changed files with 286 additions and 1 deletions
5
.changes/v1.13/BUG FIXES-20250905-083104.yaml
Normal file
5
.changes/v1.13/BUG FIXES-20250905-083104.yaml
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
kind: BUG FIXES
|
||||
body: "test: Fix the order of execution of cleanup nodes"
|
||||
time: 2025-09-05T08:31:04.56877+02:00
|
||||
custom:
|
||||
Issue: "37546"
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
|
|
@ -24,4 +24,5 @@ vendor/
|
|||
coverage.txt
|
||||
|
||||
# IDEs
|
||||
.vscode/
|
||||
.vscode/
|
||||
.zed/
|
||||
|
|
|
|||
|
|
@ -3919,6 +3919,90 @@ func TestTest_JUnitOutput(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// https://github.com/hashicorp/terraform/issues/37546
|
||||
func TestTest_TeardownOrder(t *testing.T) {
|
||||
td := t.TempDir()
|
||||
testCopyDir(t, testFixturePath(path.Join("test", "rds_shared_subnet")), td)
|
||||
t.Chdir(td)
|
||||
|
||||
provider := testing_command.NewProvider(nil)
|
||||
providerSource, close := newMockProviderSource(t, map[string][]string{
|
||||
"test": {"1.0.0"},
|
||||
})
|
||||
defer close()
|
||||
|
||||
streams, done := terminal.StreamsForTesting(t)
|
||||
view := views.NewView(streams)
|
||||
ui := new(cli.MockUi)
|
||||
|
||||
meta := Meta{
|
||||
testingOverrides: metaOverridesForProvider(provider.Provider),
|
||||
Ui: ui,
|
||||
View: view,
|
||||
Streams: streams,
|
||||
ProviderSource: providerSource,
|
||||
}
|
||||
|
||||
init := &InitCommand{
|
||||
Meta: meta,
|
||||
}
|
||||
|
||||
if code := init.Run(nil); code != 0 {
|
||||
output := done(t)
|
||||
t.Fatalf("expected status code %d but got %d: %s", 0, code, output.All())
|
||||
}
|
||||
|
||||
c := &TestCommand{
|
||||
Meta: meta,
|
||||
}
|
||||
|
||||
code := c.Run([]string{"-no-color", "-json"})
|
||||
if code != 0 {
|
||||
t.Errorf("expected status code %d but got %d", 0, code)
|
||||
}
|
||||
output := done(t).All()
|
||||
|
||||
// Parse the JSON output to check teardown order
|
||||
var setupTeardownStart time.Time
|
||||
var lastRunTeardownStart time.Time
|
||||
|
||||
for line := range strings.SplitSeq(output, "\n") {
|
||||
if strings.Contains(line, `"progress":"teardown"`) {
|
||||
var obj map[string]any
|
||||
if err := json.Unmarshal([]byte(line), &obj); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if strings.Contains(line, `"setup_tests"`) {
|
||||
if ts, ok := obj["@timestamp"].(string); ok {
|
||||
// record the first time that the setup teardown appears in the output
|
||||
if setupTeardownStart.IsZero() {
|
||||
parsedTime, _ := time.Parse(time.RFC3339, ts)
|
||||
setupTeardownStart = parsedTime
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
if ts, ok := obj["@timestamp"].(string); ok {
|
||||
parsedTime, _ := time.Parse(time.RFC3339, ts)
|
||||
// record the last time that a run's teardown appears in the output
|
||||
if parsedTime.After(lastRunTeardownStart) {
|
||||
lastRunTeardownStart = parsedTime
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// all runs should have been down with teardown before the setup starts
|
||||
if lastRunTeardownStart.After(setupTeardownStart) {
|
||||
t.Fatalf("setup is tearing down before dependants are done: \n %s", output)
|
||||
}
|
||||
|
||||
if provider.ResourceCount() > 0 {
|
||||
t.Logf("Resources remaining after test completion (this might indicate the teardown issue): %v", provider.ResourceString())
|
||||
}
|
||||
}
|
||||
|
||||
// testModuleInline takes a map of path -> config strings and yields a config
|
||||
// structure with those files loaded from disk
|
||||
func testModuleInline(t *testing.T, sources map[string]string) (*configs.Config, string, func()) {
|
||||
|
|
|
|||
37
internal/command/testdata/test/rds_shared_subnet/main.tf
vendored
Normal file
37
internal/command/testdata/test/rds_shared_subnet/main.tf
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
variable "environment" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "password" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vpc_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "db_subnet_group_name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "destroy_wait_seconds" {
|
||||
type = number
|
||||
default = 0
|
||||
}
|
||||
|
||||
# Simulates the terraform-aws-modules/rds/aws module
|
||||
# This represents the thin wrapper around the AWS RDS module
|
||||
resource "test_resource" "db" {
|
||||
value = "${var.environment}-${var.db_subnet_group_name}"
|
||||
|
||||
# Add some delay to simulate real RDS creation/deletion time
|
||||
destroy_wait_seconds = var.destroy_wait_seconds
|
||||
}
|
||||
|
||||
output "db_instance_id" {
|
||||
value = test_resource.db.value
|
||||
}
|
||||
|
||||
output "db_endpoint" {
|
||||
value = "${test_resource.db.value}.example.com"
|
||||
}
|
||||
109
internal/command/testdata/test/rds_shared_subnet/tests/create_rds.tftest.hcl
vendored
Normal file
109
internal/command/testdata/test/rds_shared_subnet/tests/create_rds.tftest.hcl
vendored
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
test {
|
||||
parallel = true
|
||||
}
|
||||
|
||||
provider "test" {
|
||||
}
|
||||
|
||||
run "setup_tests" {
|
||||
# This will create a DB subnet group that can be passed db_subnet_group_name
|
||||
# input of the module under test
|
||||
module {
|
||||
source = "./tests/setup"
|
||||
}
|
||||
}
|
||||
|
||||
run "rds_without_dns_records" {
|
||||
command = apply
|
||||
state_key = "rds_without_dns_records"
|
||||
variables {
|
||||
environment = "${run.setup_tests.name}0"
|
||||
password = run.setup_tests.password
|
||||
db_subnet_group_name = run.setup_tests.subnet_group.name
|
||||
vpc_id = run.setup_tests.vpc_id
|
||||
destroy_wait_seconds = 0
|
||||
}
|
||||
}
|
||||
|
||||
run "rds_with_replica" {
|
||||
command = apply
|
||||
providers = {
|
||||
test = test
|
||||
}
|
||||
state_key = "rds_with_replica"
|
||||
variables {
|
||||
environment = "${run.setup_tests.name}1"
|
||||
password = run.setup_tests.password
|
||||
db_subnet_group_name = run.setup_tests.subnet_group.name
|
||||
vpc_id = run.setup_tests.vpc_id
|
||||
destroy_wait_seconds = 1
|
||||
}
|
||||
}
|
||||
|
||||
run "rds_instance_three" {
|
||||
command = apply
|
||||
state_key = "rds_instance_three"
|
||||
|
||||
providers = {
|
||||
test = test
|
||||
}
|
||||
|
||||
variables {
|
||||
environment = "${run.setup_tests.name}3"
|
||||
password = run.setup_tests.password
|
||||
db_subnet_group_name = run.setup_tests.subnet_group.name
|
||||
vpc_id = run.setup_tests.vpc_id
|
||||
destroy_wait_seconds = 1
|
||||
}
|
||||
}
|
||||
|
||||
run "rds_instance_four" {
|
||||
command = apply
|
||||
state_key = "rds_instance_four"
|
||||
|
||||
providers = {
|
||||
test = test
|
||||
}
|
||||
|
||||
variables {
|
||||
environment = "${run.setup_tests.name}4"
|
||||
password = run.setup_tests.password
|
||||
db_subnet_group_name = run.setup_tests.subnet_group.name
|
||||
vpc_id = run.setup_tests.vpc_id
|
||||
destroy_wait_seconds = 1
|
||||
}
|
||||
}
|
||||
|
||||
run "rds_instance_five" {
|
||||
command = apply
|
||||
state_key = "rds_instance_five"
|
||||
|
||||
providers = {
|
||||
test = test
|
||||
}
|
||||
|
||||
variables {
|
||||
environment = "${run.setup_tests.name}5"
|
||||
password = run.setup_tests.password
|
||||
db_subnet_group_name = run.setup_tests.subnet_group.name
|
||||
vpc_id = run.setup_tests.vpc_id
|
||||
destroy_wait_seconds = 1
|
||||
}
|
||||
}
|
||||
|
||||
run "rds_instance_six" {
|
||||
command = apply
|
||||
state_key = "rds_instance_six"
|
||||
|
||||
providers = {
|
||||
test = test
|
||||
}
|
||||
|
||||
variables {
|
||||
environment = "${run.setup_tests.name}6"
|
||||
password = run.setup_tests.password
|
||||
db_subnet_group_name = run.setup_tests.subnet_group.name
|
||||
vpc_id = run.setup_tests.vpc_id
|
||||
destroy_wait_seconds = 1
|
||||
}
|
||||
}
|
||||
45
internal/command/testdata/test/rds_shared_subnet/tests/setup/main.tf
vendored
Normal file
45
internal/command/testdata/test/rds_shared_subnet/tests/setup/main.tf
vendored
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
resource "test_resource" "name" {
|
||||
value = "tftest-delete-me-normal-haddock"
|
||||
}
|
||||
|
||||
output "name" {
|
||||
value = test_resource.name.value
|
||||
}
|
||||
|
||||
resource "test_resource" "vpc" {
|
||||
value = "vpc-0ae7a165e6927405b"
|
||||
}
|
||||
|
||||
output "vpc_id" {
|
||||
value = test_resource.vpc.value
|
||||
}
|
||||
|
||||
resource "test_resource" "subnet_b" {
|
||||
value = "subnet-eu-west-1b-${test_resource.vpc.value}"
|
||||
}
|
||||
|
||||
resource "test_resource" "subnet_c" {
|
||||
value = "subnet-eu-west-1c-${test_resource.vpc.value}"
|
||||
}
|
||||
|
||||
resource "test_resource" "subnet_group" {
|
||||
value = "${test_resource.name.value}-${test_resource.subnet_b.value}-${test_resource.subnet_c.value}"
|
||||
# Add delay to simulate real AWS resource creation/deletion time
|
||||
destroy_wait_seconds = 2
|
||||
}
|
||||
|
||||
output "subnet_group" {
|
||||
value = {
|
||||
name = test_resource.subnet_group.value
|
||||
id = test_resource.subnet_group.value
|
||||
}
|
||||
}
|
||||
|
||||
resource "test_resource" "password" {
|
||||
value = "supersecretpassword123"
|
||||
}
|
||||
|
||||
output "password" {
|
||||
value = test_resource.password.value
|
||||
sensitive = true
|
||||
}
|
||||
|
|
@ -111,6 +111,10 @@ func (t *TestStateCleanupTransformer) depthFirstTraverse(g *terraform.Graph, nod
|
|||
if visited[node.stateKey] {
|
||||
return
|
||||
}
|
||||
// don't mark the node as visited if it's a leaf node, this ensures that other dependencies are still added to it
|
||||
if len(depStateKeys[node.stateKey]) == 0 {
|
||||
return
|
||||
}
|
||||
visited[node.stateKey] = true
|
||||
|
||||
for _, refStateKey := range depStateKeys[node.stateKey] {
|
||||
|
|
|
|||
Loading…
Reference in a new issue