mirror of
https://github.com/hashicorp/terraform.git
synced 2026-02-03 20:50:59 -05:00
command/meta: Enable migration from PSS to a backend (#37949)
Some checks are pending
build / Determine intended Terraform version (push) Waiting to run
build / Determine Go toolchain version (push) Waiting to run
build / Generate release metadata (push) Blocked by required conditions
build / Build for freebsd_386 (push) Blocked by required conditions
build / Build for linux_386 (push) Blocked by required conditions
build / Build for openbsd_386 (push) Blocked by required conditions
build / Build for windows_386 (push) Blocked by required conditions
build / Build for darwin_amd64 (push) Blocked by required conditions
build / Build for freebsd_amd64 (push) Blocked by required conditions
build / Build for linux_amd64 (push) Blocked by required conditions
build / Build for openbsd_amd64 (push) Blocked by required conditions
build / Build for solaris_amd64 (push) Blocked by required conditions
build / Build for windows_amd64 (push) Blocked by required conditions
build / Build for freebsd_arm (push) Blocked by required conditions
build / Build for linux_arm (push) Blocked by required conditions
build / Build for darwin_arm64 (push) Blocked by required conditions
build / Build for linux_arm64 (push) Blocked by required conditions
build / Build for windows_arm64 (push) Blocked by required conditions
build / Build Docker image for linux_386 (push) Blocked by required conditions
build / Build Docker image for linux_amd64 (push) Blocked by required conditions
build / Build Docker image for linux_arm (push) Blocked by required conditions
build / Build Docker image for linux_arm64 (push) Blocked by required conditions
build / Build e2etest for linux_386 (push) Blocked by required conditions
build / Build e2etest for windows_386 (push) Blocked by required conditions
build / Build e2etest for darwin_amd64 (push) Blocked by required conditions
build / Build e2etest for linux_amd64 (push) Blocked by required conditions
build / Build e2etest for windows_amd64 (push) Blocked by required conditions
build / Build e2etest for linux_arm (push) Blocked by required conditions
build / Build e2etest for darwin_arm64 (push) Blocked by required conditions
build / Build e2etest for linux_arm64 (push) Blocked by required conditions
build / Run e2e test for linux_386 (push) Blocked by required conditions
build / Run e2e test for windows_386 (push) Blocked by required conditions
build / Run e2e test for darwin_amd64 (push) Blocked by required conditions
build / Run e2e test for linux_amd64 (push) Blocked by required conditions
build / Run e2e test for windows_amd64 (push) Blocked by required conditions
build / Run e2e test for linux_arm (push) Blocked by required conditions
build / Run e2e test for linux_arm64 (push) Blocked by required conditions
build / Run terraform-exec test for linux amd64 (push) Blocked by required conditions
Quick Checks / Unit Tests (push) Waiting to run
Quick Checks / Race Tests (push) Waiting to run
Quick Checks / End-to-end Tests (push) Waiting to run
Quick Checks / Code Consistency Checks (push) Waiting to run
Some checks are pending
build / Determine intended Terraform version (push) Waiting to run
build / Determine Go toolchain version (push) Waiting to run
build / Generate release metadata (push) Blocked by required conditions
build / Build for freebsd_386 (push) Blocked by required conditions
build / Build for linux_386 (push) Blocked by required conditions
build / Build for openbsd_386 (push) Blocked by required conditions
build / Build for windows_386 (push) Blocked by required conditions
build / Build for darwin_amd64 (push) Blocked by required conditions
build / Build for freebsd_amd64 (push) Blocked by required conditions
build / Build for linux_amd64 (push) Blocked by required conditions
build / Build for openbsd_amd64 (push) Blocked by required conditions
build / Build for solaris_amd64 (push) Blocked by required conditions
build / Build for windows_amd64 (push) Blocked by required conditions
build / Build for freebsd_arm (push) Blocked by required conditions
build / Build for linux_arm (push) Blocked by required conditions
build / Build for darwin_arm64 (push) Blocked by required conditions
build / Build for linux_arm64 (push) Blocked by required conditions
build / Build for windows_arm64 (push) Blocked by required conditions
build / Build Docker image for linux_386 (push) Blocked by required conditions
build / Build Docker image for linux_amd64 (push) Blocked by required conditions
build / Build Docker image for linux_arm (push) Blocked by required conditions
build / Build Docker image for linux_arm64 (push) Blocked by required conditions
build / Build e2etest for linux_386 (push) Blocked by required conditions
build / Build e2etest for windows_386 (push) Blocked by required conditions
build / Build e2etest for darwin_amd64 (push) Blocked by required conditions
build / Build e2etest for linux_amd64 (push) Blocked by required conditions
build / Build e2etest for windows_amd64 (push) Blocked by required conditions
build / Build e2etest for linux_arm (push) Blocked by required conditions
build / Build e2etest for darwin_arm64 (push) Blocked by required conditions
build / Build e2etest for linux_arm64 (push) Blocked by required conditions
build / Run e2e test for linux_386 (push) Blocked by required conditions
build / Run e2e test for windows_386 (push) Blocked by required conditions
build / Run e2e test for darwin_amd64 (push) Blocked by required conditions
build / Run e2e test for linux_amd64 (push) Blocked by required conditions
build / Run e2e test for windows_amd64 (push) Blocked by required conditions
build / Run e2e test for linux_arm (push) Blocked by required conditions
build / Run e2e test for linux_arm64 (push) Blocked by required conditions
build / Run terraform-exec test for linux amd64 (push) Blocked by required conditions
Quick Checks / Unit Tests (push) Waiting to run
Quick Checks / Race Tests (push) Waiting to run
Quick Checks / End-to-end Tests (push) Waiting to run
Quick Checks / Code Consistency Checks (push) Waiting to run
* command/meta: Enable migration from PSS to a backend * Address PR feedback * Update internal/command/meta_backend.go Co-authored-by: Sarah French <15078782+SarahFrench@users.noreply.github.com> * meta_backend: Rename stateStore_c_S to stateStore_to_backend --------- Co-authored-by: Sarah French <15078782+SarahFrench@users.noreply.github.com>
This commit is contained in:
parent
b2aad914f1
commit
213d133d86
7 changed files with 361 additions and 152 deletions
|
|
@ -4,13 +4,9 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/go-retryablehttp"
|
||||
|
|
@ -23,7 +19,7 @@ func TestHTTPClient_impl(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestHTTPClient(t *testing.T) {
|
||||
handler := new(testHTTPHandler)
|
||||
handler := new(TestHTTPBackend)
|
||||
ts := httptest.NewServer(http.HandlerFunc(handler.Handle))
|
||||
defer ts.Close()
|
||||
|
||||
|
|
@ -66,7 +62,7 @@ func TestHTTPClient(t *testing.T) {
|
|||
remote.TestRemoteLocks(t, a, b)
|
||||
|
||||
// test a WebDAV-ish backend
|
||||
davhandler := new(testHTTPHandler)
|
||||
davhandler := new(TestHTTPBackend)
|
||||
ts = httptest.NewServer(http.HandlerFunc(davhandler.HandleWebDAV))
|
||||
defer ts.Close()
|
||||
|
||||
|
|
@ -84,8 +80,8 @@ func TestHTTPClient(t *testing.T) {
|
|||
remote.TestClient(t, client) // second time, with identical data: 204
|
||||
|
||||
// test a broken backend
|
||||
brokenHandler := new(testBrokenHTTPHandler)
|
||||
brokenHandler.handler = new(testHTTPHandler)
|
||||
brokenHandler := new(TestBrokenHTTPBackend)
|
||||
brokenHandler.handler = new(TestHTTPBackend)
|
||||
ts = httptest.NewServer(http.HandlerFunc(brokenHandler.Handle))
|
||||
defer ts.Close()
|
||||
|
||||
|
|
@ -97,77 +93,12 @@ func TestHTTPClient(t *testing.T) {
|
|||
remote.TestClient(t, client)
|
||||
}
|
||||
|
||||
type testHTTPHandler struct {
|
||||
Data []byte
|
||||
Locked bool
|
||||
}
|
||||
|
||||
func (h *testHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
w.Write(h.Data)
|
||||
case "PUT":
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, r.Body); err != nil {
|
||||
w.WriteHeader(500)
|
||||
}
|
||||
w.WriteHeader(201)
|
||||
h.Data = buf.Bytes()
|
||||
case "POST":
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, r.Body); err != nil {
|
||||
w.WriteHeader(500)
|
||||
}
|
||||
h.Data = buf.Bytes()
|
||||
case "LOCK":
|
||||
if h.Locked {
|
||||
w.WriteHeader(423)
|
||||
} else {
|
||||
h.Locked = true
|
||||
}
|
||||
case "UNLOCK":
|
||||
h.Locked = false
|
||||
case "DELETE":
|
||||
h.Data = nil
|
||||
w.WriteHeader(200)
|
||||
default:
|
||||
w.WriteHeader(500)
|
||||
w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method)))
|
||||
}
|
||||
}
|
||||
|
||||
// mod_dav-ish behavior
|
||||
func (h *testHTTPHandler) HandleWebDAV(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
w.Write(h.Data)
|
||||
case "PUT":
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, r.Body); err != nil {
|
||||
w.WriteHeader(500)
|
||||
}
|
||||
if reflect.DeepEqual(h.Data, buf.Bytes()) {
|
||||
h.Data = buf.Bytes()
|
||||
w.WriteHeader(204)
|
||||
} else {
|
||||
h.Data = buf.Bytes()
|
||||
w.WriteHeader(201)
|
||||
}
|
||||
case "DELETE":
|
||||
h.Data = nil
|
||||
w.WriteHeader(200)
|
||||
default:
|
||||
w.WriteHeader(500)
|
||||
w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method)))
|
||||
}
|
||||
}
|
||||
|
||||
type testBrokenHTTPHandler struct {
|
||||
type TestBrokenHTTPBackend struct {
|
||||
lastRequestWasBroken bool
|
||||
handler *testHTTPHandler
|
||||
handler *TestHTTPBackend
|
||||
}
|
||||
|
||||
func (h *testBrokenHTTPHandler) Handle(w http.ResponseWriter, r *http.Request) {
|
||||
func (h *TestBrokenHTTPBackend) Handle(w http.ResponseWriter, r *http.Request) {
|
||||
if h.lastRequestWasBroken {
|
||||
h.lastRequestWasBroken = false
|
||||
h.handler.Handle(w, r)
|
||||
|
|
|
|||
92
internal/backend/remote-state/http/test_backend.go
Normal file
92
internal/backend/remote-state/http/test_backend.go
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
package http
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type TestHTTPBackend struct {
|
||||
Data []byte
|
||||
Locked bool
|
||||
|
||||
GetCalled int
|
||||
PutCalled int
|
||||
PostCalled int
|
||||
LockCalled int
|
||||
UnlockCalled int
|
||||
DeleteCalled int
|
||||
}
|
||||
|
||||
func (h *TestHTTPBackend) Handle(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
h.GetCalled++
|
||||
w.Write(h.Data)
|
||||
case "PUT":
|
||||
h.PutCalled++
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, r.Body); err != nil {
|
||||
w.WriteHeader(500)
|
||||
}
|
||||
w.WriteHeader(201)
|
||||
h.Data = buf.Bytes()
|
||||
case "POST":
|
||||
h.PostCalled++
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, r.Body); err != nil {
|
||||
w.WriteHeader(500)
|
||||
}
|
||||
h.Data = buf.Bytes()
|
||||
case "LOCK":
|
||||
h.LockCalled++
|
||||
if h.Locked {
|
||||
w.WriteHeader(423)
|
||||
} else {
|
||||
h.Locked = true
|
||||
}
|
||||
case "UNLOCK":
|
||||
h.UnlockCalled++
|
||||
h.Locked = false
|
||||
case "DELETE":
|
||||
h.DeleteCalled++
|
||||
h.Data = nil
|
||||
w.WriteHeader(200)
|
||||
default:
|
||||
w.WriteHeader(500)
|
||||
w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method)))
|
||||
}
|
||||
}
|
||||
|
||||
// mod_dav-ish behavior
|
||||
func (h *TestHTTPBackend) HandleWebDAV(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case "GET":
|
||||
h.GetCalled++
|
||||
w.Write(h.Data)
|
||||
case "PUT":
|
||||
h.PutCalled++
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, r.Body); err != nil {
|
||||
w.WriteHeader(500)
|
||||
}
|
||||
if reflect.DeepEqual(h.Data, buf.Bytes()) {
|
||||
h.Data = buf.Bytes()
|
||||
w.WriteHeader(204)
|
||||
} else {
|
||||
h.Data = buf.Bytes()
|
||||
w.WriteHeader(201)
|
||||
}
|
||||
case "DELETE":
|
||||
h.DeleteCalled++
|
||||
h.Data = nil
|
||||
w.WriteHeader(200)
|
||||
default:
|
||||
w.WriteHeader(500)
|
||||
w.Write([]byte(fmt.Sprintf("Unknown method: %s", r.Method)))
|
||||
}
|
||||
}
|
||||
|
|
@ -6,6 +6,8 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
|
@ -21,6 +23,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform/internal/addrs"
|
||||
"github.com/hashicorp/terraform/internal/backend"
|
||||
httpBackend "github.com/hashicorp/terraform/internal/backend/remote-state/http"
|
||||
"github.com/hashicorp/terraform/internal/command/arguments"
|
||||
"github.com/hashicorp/terraform/internal/command/clistate"
|
||||
"github.com/hashicorp/terraform/internal/command/views"
|
||||
|
|
@ -4112,6 +4115,9 @@ func TestInit_stateStore_unset(t *testing.T) {
|
|||
if !s.StateStore.Empty() {
|
||||
t.Fatal("should not have StateStore config")
|
||||
}
|
||||
if !s.Backend.Empty() {
|
||||
t.Fatalf("expected empty Backend config after unsetting state store, found: %#v", s.Backend)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -4211,6 +4217,211 @@ func TestInit_stateStore_unset_withoutProviderRequirements(t *testing.T) {
|
|||
if !s.StateStore.Empty() {
|
||||
t.Fatal("should not have StateStore config")
|
||||
}
|
||||
if !s.Backend.Empty() {
|
||||
t.Fatalf("expected empty Backend config after unsetting state store, found: %#v", s.Backend)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInit_stateStore_to_backend(t *testing.T) {
|
||||
// Create a temporary working directory that is empty
|
||||
td := t.TempDir()
|
||||
testCopyDir(t, testFixturePath("init-state-store"), td)
|
||||
t.Chdir(td)
|
||||
|
||||
mockProvider := mockPluggableStateStorageProvider()
|
||||
mockProviderAddress := addrs.NewDefaultProvider("test")
|
||||
providerSource, close := newMockProviderSource(t, map[string][]string{
|
||||
"hashicorp/test": {"1.2.3"}, // Matches provider version in backend state file fixture
|
||||
})
|
||||
defer close()
|
||||
|
||||
tOverrides := &testingOverrides{
|
||||
Providers: map[addrs.Provider]providers.Factory{
|
||||
mockProviderAddress: providers.FactoryFixed(mockProvider),
|
||||
},
|
||||
}
|
||||
|
||||
{
|
||||
log.Printf("[TRACE] TestInit_stateStore_to_backend: beginning first init")
|
||||
// Init
|
||||
ui := cli.NewMockUi()
|
||||
view, done := testView(t)
|
||||
c := &InitCommand{
|
||||
Meta: Meta{
|
||||
testingOverrides: tOverrides,
|
||||
ProviderSource: providerSource,
|
||||
Ui: ui,
|
||||
View: view,
|
||||
AllowExperimentalFeatures: true,
|
||||
},
|
||||
}
|
||||
args := []string{
|
||||
"-enable-pluggable-state-storage-experiment=true",
|
||||
}
|
||||
code := c.Run(args)
|
||||
testOutput := done(t)
|
||||
if code != 0 {
|
||||
t.Fatalf("bad: \n%s", testOutput.All())
|
||||
}
|
||||
log.Printf("[TRACE] TestInit_stateStore_to_backend: first init complete")
|
||||
t.Logf("First run output:\n%s", testOutput.Stdout())
|
||||
t.Logf("First run errors:\n%s", testOutput.Stderr())
|
||||
|
||||
if _, err := os.Stat(filepath.Join(DefaultDataDir, DefaultStateFilename)); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
{
|
||||
// run apply to ensure state isn't empty
|
||||
// to bypass edge case handling which causes empty state to stop migration
|
||||
log.Printf("[TRACE] TestInit_stateStore_to_backend: beginning apply")
|
||||
ui := cli.NewMockUi()
|
||||
aView, aDone := testView(t)
|
||||
cApply := &ApplyCommand{
|
||||
Meta: Meta{
|
||||
testingOverrides: tOverrides,
|
||||
ProviderSource: providerSource,
|
||||
Ui: ui,
|
||||
View: aView,
|
||||
AllowExperimentalFeatures: true,
|
||||
},
|
||||
}
|
||||
aCode := cApply.Run([]string{"-auto-approve"})
|
||||
aTestOutput := aDone(t)
|
||||
if aCode != 0 {
|
||||
t.Fatalf("bad: \n%s", aTestOutput.All())
|
||||
}
|
||||
|
||||
t.Logf("Apply output:\n%s", aTestOutput.Stdout())
|
||||
t.Logf("Apply errors:\n%s", aTestOutput.Stderr())
|
||||
}
|
||||
{
|
||||
log.Printf("[TRACE] TestInit_stateStore_to_backend: beginning uninitialised apply")
|
||||
|
||||
backendCfg := []byte(`terraform {
|
||||
backend "http" {
|
||||
address = "https://example.com"
|
||||
}
|
||||
}
|
||||
`)
|
||||
if err := os.WriteFile("main.tf", backendCfg, 0644); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
ui := cli.NewMockUi()
|
||||
view, done := testView(t)
|
||||
cApply := &ApplyCommand{
|
||||
Meta: Meta{
|
||||
testingOverrides: tOverrides,
|
||||
ProviderSource: providerSource,
|
||||
Ui: ui,
|
||||
View: view,
|
||||
AllowExperimentalFeatures: true,
|
||||
},
|
||||
}
|
||||
code := cApply.Run([]string{"-auto-approve"})
|
||||
testOutput := done(t)
|
||||
if code == 0 {
|
||||
t.Fatalf("expected apply to fail: \n%s", testOutput.All())
|
||||
}
|
||||
log.Printf("[TRACE] TestInit_stateStore_to_backend: apply complete")
|
||||
expectedErr := "Backend initialization required"
|
||||
if !strings.Contains(testOutput.Stderr(), expectedErr) {
|
||||
t.Fatalf("unexpected error, expected %q, given: %q", expectedErr, testOutput.Stderr())
|
||||
}
|
||||
|
||||
log.Printf("[TRACE] TestInit_stateStore_to_backend: uninitialised apply complete")
|
||||
t.Logf("First run output:\n%s", testOutput.Stdout())
|
||||
t.Logf("First run errors:\n%s", testOutput.Stderr())
|
||||
|
||||
if _, err := os.Stat(filepath.Join(DefaultDataDir, DefaultStateFilename)); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
{
|
||||
log.Printf("[TRACE] TestInit_stateStore_to_backend: beginning second init")
|
||||
|
||||
testBackend := new(httpBackend.TestHTTPBackend)
|
||||
ts := httptest.NewServer(http.HandlerFunc(testBackend.Handle))
|
||||
defer ts.Close()
|
||||
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
// Override state store to backend
|
||||
backendCfg := fmt.Sprintf(`terraform {
|
||||
backend "http" {
|
||||
address = %q
|
||||
}
|
||||
}
|
||||
`, ts.URL)
|
||||
if err := os.WriteFile("main.tf", []byte(backendCfg), 0644); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
ui := cli.NewMockUi()
|
||||
view, done := testView(t)
|
||||
c := &InitCommand{
|
||||
Meta: Meta{
|
||||
testingOverrides: &testingOverrides{
|
||||
Providers: map[addrs.Provider]providers.Factory{
|
||||
mockProviderAddress: providers.FactoryFixed(mockProvider),
|
||||
},
|
||||
},
|
||||
ProviderSource: providerSource,
|
||||
Ui: ui,
|
||||
View: view,
|
||||
AllowExperimentalFeatures: true,
|
||||
},
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"-enable-pluggable-state-storage-experiment=true",
|
||||
"-force-copy",
|
||||
}
|
||||
code := c.Run(args)
|
||||
testOutput := done(t)
|
||||
if code != 0 {
|
||||
t.Fatalf("bad: \n%s", testOutput.All())
|
||||
}
|
||||
log.Printf("[TRACE] TestInit_stateStore_to_backend: second init complete")
|
||||
t.Logf("Second run output:\n%s", testOutput.Stdout())
|
||||
t.Logf("Second run errors:\n%s", testOutput.Stderr())
|
||||
|
||||
s := testDataStateRead(t, filepath.Join(DefaultDataDir, DefaultStateFilename))
|
||||
if !s.StateStore.Empty() {
|
||||
t.Fatal("should not have StateStore config")
|
||||
}
|
||||
if s.Backend.Empty() {
|
||||
t.Fatalf("expected backend to not be empty")
|
||||
}
|
||||
|
||||
data, err := statefile.Read(bytes.NewBuffer(testBackend.Data))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedOutputs := map[string]*states.OutputValue{
|
||||
"test": &states.OutputValue{
|
||||
Addr: addrs.AbsOutputValue{
|
||||
OutputValue: addrs.OutputValue{
|
||||
Name: "test",
|
||||
},
|
||||
},
|
||||
Value: cty.StringVal("test"),
|
||||
},
|
||||
}
|
||||
if diff := cmp.Diff(expectedOutputs, data.State.RootOutputValues); diff != "" {
|
||||
t.Fatalf("unexpected data: %s", diff)
|
||||
}
|
||||
|
||||
expectedGetCalls := 4
|
||||
if testBackend.GetCalled != expectedGetCalls {
|
||||
t.Fatalf("expected %d GET calls, got %d", expectedGetCalls, testBackend.GetCalled)
|
||||
}
|
||||
expectedPostCalls := 1
|
||||
if testBackend.PostCalled != expectedPostCalls {
|
||||
t.Fatalf("expected %d POST calls, got %d", expectedPostCalls, testBackend.PostCalled)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -4411,9 +4622,9 @@ func mockPluggableStateStorageProvider() *testing_provider.MockProvider {
|
|||
if mock.MockStates == nil {
|
||||
mock.MockStates = make(map[string]interface{})
|
||||
}
|
||||
|
||||
mock.MockStates[req.StateId] = req.Bytes
|
||||
}
|
||||
mock.MockStates[req.StateId] = req.Bytes
|
||||
|
||||
return providers.WriteStateBytesResponse{
|
||||
Diagnostics: nil, // success
|
||||
}
|
||||
|
|
|
|||
|
|
@ -836,7 +836,17 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, tfdiags.Di
|
|||
return nil, diags
|
||||
}
|
||||
|
||||
return m.stateStore_c_S(sMgr, opts.ViewType)
|
||||
// Grab a purely local backend to be the destination for migrated state
|
||||
localB, moreDiags := m.Backend(&BackendOpts{ForceLocal: true, Init: true})
|
||||
diags = diags.Append(moreDiags)
|
||||
if moreDiags.HasErrors() {
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
v := views.NewInit(opts.ViewType, m.View)
|
||||
v.Output(views.InitMessageCode("state_store_unset"), s.StateStore.Type)
|
||||
|
||||
return m.stateStore_to_backend(sMgr, "local", localB, nil, opts.ViewType)
|
||||
|
||||
// Configuring a backend for the first time or -reconfigure flag was used
|
||||
case backendConfig != nil && s.Backend.Empty() &&
|
||||
|
|
@ -884,11 +894,30 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, tfdiags.Di
|
|||
s.StateStore.Provider.Source,
|
||||
backendConfig.Type,
|
||||
)
|
||||
return nil, diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Not implemented yet",
|
||||
Detail: "Migration from state store to backend is not implemented yet",
|
||||
})
|
||||
|
||||
if !opts.Init {
|
||||
initReason := fmt.Sprintf("Migrating from state store %q to backend %q",
|
||||
s.StateStore.Type, backendConfig.Type)
|
||||
diags = diags.Append(errBackendInitDiag(initReason))
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
b, configVal, moreDiags := m.backendInitFromConfig(backendConfig)
|
||||
diags = diags.Append(moreDiags)
|
||||
if moreDiags.HasErrors() {
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
v := views.NewInit(opts.ViewType, m.View)
|
||||
v.Output(views.InitMessageCode("state_store_migrate_backend"), s.StateStore.Type, backendConfig.Type)
|
||||
|
||||
newBackendCfgState := &workdir.BackendConfigState{
|
||||
Type: backendConfig.Type,
|
||||
}
|
||||
newBackendCfgState.SetConfig(configVal, b.ConfigSchema())
|
||||
newBackendCfgState.Hash = uint64(cHash)
|
||||
|
||||
return m.stateStore_to_backend(sMgr, backendConfig.Type, b, newBackendCfgState, opts.ViewType)
|
||||
|
||||
// Migration from backend to state store
|
||||
case backendConfig == nil && !s.Backend.Empty() &&
|
||||
|
|
@ -1910,8 +1939,8 @@ func (m *Meta) stateStore_C_s(c *configs.StateStore, stateStoreHash int, backend
|
|||
return b, diags
|
||||
}
|
||||
|
||||
// Unconfiguring a state store (moving from state store => local).
|
||||
func (m *Meta) stateStore_c_S(ssSMgr *clistate.LocalState, viewType arguments.ViewType) (backend.Backend, tfdiags.Diagnostics) {
|
||||
// Migrating a state store to backend (including local).
|
||||
func (m *Meta) stateStore_to_backend(ssSMgr *clistate.LocalState, dstBackendType string, dstBackend backend.Backend, newBackendState *workdir.BackendConfigState, viewType arguments.ViewType) (backend.Backend, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
s := ssSMgr.State()
|
||||
|
|
@ -1920,13 +1949,6 @@ func (m *Meta) stateStore_c_S(ssSMgr *clistate.LocalState, viewType arguments.Vi
|
|||
view := views.NewInit(viewType, m.View)
|
||||
view.Output(views.StateMigrateLocalMessage, stateStoreType)
|
||||
|
||||
// Grab a purely local backend to get the local state if it exists
|
||||
localB, moreDiags := m.Backend(&BackendOpts{ForceLocal: true, Init: true})
|
||||
diags = diags.Append(moreDiags)
|
||||
if moreDiags.HasErrors() {
|
||||
return nil, diags
|
||||
}
|
||||
|
||||
// Initialize the configured state store
|
||||
ss, moreDiags := m.savedStateStore(ssSMgr)
|
||||
diags = diags.Append(moreDiags)
|
||||
|
|
@ -1937,9 +1959,9 @@ func (m *Meta) stateStore_c_S(ssSMgr *clistate.LocalState, viewType arguments.Vi
|
|||
// Perform the migration
|
||||
err := m.backendMigrateState(&backendMigrateOpts{
|
||||
SourceType: stateStoreType,
|
||||
DestinationType: "local",
|
||||
DestinationType: dstBackendType,
|
||||
Source: ss,
|
||||
Destination: localB,
|
||||
Destination: dstBackend,
|
||||
ViewType: viewType,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -1949,6 +1971,7 @@ func (m *Meta) stateStore_c_S(ssSMgr *clistate.LocalState, viewType arguments.Vi
|
|||
|
||||
// Remove the stored metadata
|
||||
s.StateStore = nil
|
||||
s.Backend = newBackendState
|
||||
if err := ssSMgr.WriteState(s); err != nil {
|
||||
diags = diags.Append(errStateStoreClearSaved{err})
|
||||
return nil, diags
|
||||
|
|
@ -1958,11 +1981,8 @@ func (m *Meta) stateStore_c_S(ssSMgr *clistate.LocalState, viewType arguments.Vi
|
|||
return nil, diags
|
||||
}
|
||||
|
||||
v := views.NewInit(viewType, m.View)
|
||||
v.Output(views.InitMessageCode("state_store_unset"), stateStoreType)
|
||||
|
||||
// Return no state store
|
||||
return nil, diags
|
||||
// Return backend
|
||||
return dstBackend, diags
|
||||
}
|
||||
|
||||
// getStateStorageProviderVersion gets the current version of the state store provider that's in use. This is achieved
|
||||
|
|
|
|||
|
|
@ -2136,58 +2136,6 @@ func TestMetaBackend_configuredBackendToStateStore(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Changing from using state_store to backend
|
||||
//
|
||||
// TODO(SarahFrench/radeksimko): currently this test only confirms that we're hitting the switch
|
||||
// case for this scenario, and will need to be updated when that init feature is implemented.
|
||||
func TestMetaBackend_configuredStateStoreToBackend(t *testing.T) {
|
||||
td := t.TempDir()
|
||||
testCopyDir(t, testFixturePath("state-store-to-backend"), td)
|
||||
t.Chdir(td)
|
||||
|
||||
// Setup the meta
|
||||
m := testMetaBackend(t, nil)
|
||||
m.AllowExperimentalFeatures = true
|
||||
|
||||
// Get the backend's config
|
||||
mod, loadDiags := m.loadSingleModule(td)
|
||||
if loadDiags.HasErrors() {
|
||||
t.Fatalf("unexpected error when loading test config: %s", loadDiags.Err())
|
||||
}
|
||||
|
||||
providerAddr := tfaddr.MustParseProviderSource("hashicorp/test")
|
||||
constraint, err := providerreqs.ParseVersionConstraints(">1.0.0")
|
||||
if err != nil {
|
||||
t.Fatalf("test setup failed when making constraint: %s", err)
|
||||
}
|
||||
locks := depsfile.NewLocks()
|
||||
locks.SetProvider(
|
||||
providerAddr,
|
||||
versions.MustParseVersion("1.2.3"),
|
||||
constraint,
|
||||
[]providerreqs.Hash{""},
|
||||
)
|
||||
|
||||
// No mock provider is used here - yet
|
||||
// Logic will need to be implemented that lets the init have access to
|
||||
// a factory for the 'old' provider used for PSS previously. This will be
|
||||
// used when migrating away from PSS entirely, or to a new PSS configuration.
|
||||
|
||||
// Get the operations backend
|
||||
_, beDiags := m.Backend(&BackendOpts{
|
||||
Init: true,
|
||||
BackendConfig: mod.Backend,
|
||||
Locks: locks,
|
||||
})
|
||||
if !beDiags.HasErrors() {
|
||||
t.Fatal("expected an error to be returned during partial implementation of PSS")
|
||||
}
|
||||
wantErr := "Migration from state store to backend is not implemented yet"
|
||||
if !strings.Contains(beDiags.Err().Error(), wantErr) {
|
||||
t.Fatalf("expected the returned error to contain %q, but got: %s", wantErr, beDiags.Err())
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that using variables results in an error
|
||||
func TestMetaBackend_configureStateStoreVariableUse(t *testing.T) {
|
||||
wantErr := "Variables not allowed"
|
||||
|
|
|
|||
3
internal/command/testdata/init-state-store/output.tf
vendored
Normal file
3
internal/command/testdata/init-state-store/output.tf
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
output "test" {
|
||||
value = "test"
|
||||
}
|
||||
|
|
@ -263,6 +263,10 @@ var MessageRegistry map[InitMessageCode]InitMessage = map[InitMessageCode]InitMe
|
|||
HumanValue: "[reset][green]\n\nSuccessfully unset the state store %q. Terraform will now operate locally.",
|
||||
JSONValue: "Successfully unset the state store %q. Terraform will now operate locally.",
|
||||
},
|
||||
"state_store_migrate_backend": {
|
||||
HumanValue: "Migrating from %q state store to %q backend.",
|
||||
JSONValue: "Migrating from %q state store to %q backend.",
|
||||
},
|
||||
"backend_configured_success": {
|
||||
HumanValue: backendConfiguredSuccessHuman,
|
||||
JSONValue: backendConfiguredSuccessJSON,
|
||||
|
|
|
|||
Loading…
Reference in a new issue