mirror of
https://github.com/hashicorp/terraform-provider-kubernetes.git
synced 2025-12-18 23:06:07 -05:00
Add Golang linter (#2341)
This commit is contained in:
parent
0d9471d236
commit
a56c3be6e7
49 changed files with 386 additions and 361 deletions
26
.github/workflows/golangci-lint.yaml
vendored
Normal file
26
.github/workflows/golangci-lint.yaml
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
name: Golang Linter
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- '**/*.go'
|
||||
- '**/go.mod'
|
||||
|
||||
jobs:
|
||||
golang_linter:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0
|
||||
with:
|
||||
version: 'v1.55.2'
|
||||
skip-pkg-cache: true
|
||||
33
.golangci.yml
Normal file
33
.golangci.yml
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
run:
|
||||
timeout: 5m
|
||||
|
||||
issues:
|
||||
max-per-linter: 0
|
||||
max-same-issues: 0
|
||||
exclude-rules:
|
||||
- path: manifest/provider/resource.go
|
||||
linters:
|
||||
- staticcheck
|
||||
# We need to use ValueFromMsgPack due to some missing abstraction in plugin-go.
|
||||
text: "SA1019: tftypes.ValueFromMsgPack is deprecated: this function is exported for internal use in terraform-plugin-go."
|
||||
- path: manifest/provider/import.go
|
||||
linters:
|
||||
- staticcheck
|
||||
# We need to use MarshalMsgPack due to some missing abstraction in plugin-go.
|
||||
text: "SA1019: impf.MarshalMsgPack is deprecated: this is not meant to be called by third parties."
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- gosimple
|
||||
- gofmt
|
||||
- staticcheck
|
||||
|
||||
linters-settings:
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
gosimple:
|
||||
checks:
|
||||
- all
|
||||
- '-S1040' # Type assertion to current type: https://staticcheck.dev/docs/checks/#S1040
|
||||
|
|
@ -101,10 +101,14 @@ tools:
|
|||
go install github.com/bflad/tfproviderlint/cmd/tfproviderlint@v0.28.1
|
||||
go install github.com/bflad/tfproviderdocs@v0.9.1
|
||||
go install github.com/katbyte/terrafmt@v0.5.2
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.50.0
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.55.2
|
||||
go install github.com/hashicorp/go-changelog/cmd/changelog-build@latest
|
||||
go install github.com/hashicorp/go-changelog/cmd/changelog-entry@latest
|
||||
|
||||
go-lint: tools
|
||||
@echo "==> Run Golang CLI linter..."
|
||||
@golangci-lint run
|
||||
|
||||
vet:
|
||||
@echo "go vet ./..."
|
||||
@go vet $$(go list ./...) ; if [ $$? -eq 1 ]; then \
|
||||
|
|
|
|||
|
|
@ -338,7 +338,7 @@ func Provider() *schema.Provider {
|
|||
// admission control
|
||||
"kubernetes_validating_webhook_configuration": resourceKubernetesValidatingWebhookConfigurationV1Beta1(),
|
||||
"kubernetes_validating_webhook_configuration_v1": resourceKubernetesValidatingWebhookConfigurationV1(),
|
||||
"kubernetes_mutating_webhook_configuration": resourceKubernetesMutatingWebhookConfigurationV1(),
|
||||
"kubernetes_mutating_webhook_configuration": resourceKubernetesMutatingWebhookConfiguration(),
|
||||
"kubernetes_mutating_webhook_configuration_v1": resourceKubernetesMutatingWebhookConfigurationV1(),
|
||||
|
||||
// storage
|
||||
|
|
@ -464,7 +464,7 @@ func providerConfigure(ctx context.Context, d *schema.ResourceData, terraformVer
|
|||
if logging.IsDebugOrHigher() {
|
||||
log.Printf("[DEBUG] Enabling HTTP requests/responses tracing")
|
||||
cfg.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
|
||||
return logging.NewTransport("Kubernetes", rt)
|
||||
return logging.NewSubsystemLoggingHTTPTransport("Kubernetes", rt)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ func unsetEnv(t *testing.T) func() {
|
|||
"KUBE_TOKEN": e.Token,
|
||||
}
|
||||
|
||||
for k, _ := range envVars {
|
||||
for k := range envVars {
|
||||
if err := os.Unsetenv(k); err != nil {
|
||||
t.Fatalf("Error unsetting env var %s: %s", k, err)
|
||||
}
|
||||
|
|
@ -194,7 +194,6 @@ func testAccPreCheck(t *testing.T) {
|
|||
if diags.HasError() {
|
||||
t.Fatal(diags[0].Summary)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getClusterVersion() (*gversion.Version, error) {
|
||||
|
|
@ -220,20 +219,20 @@ func getClusterVersion() (*gversion.Version, error) {
|
|||
func setClusterVersionVar(t *testing.T, varName string) {
|
||||
cv, err := getClusterVersion()
|
||||
if err != nil {
|
||||
t.Skip(fmt.Sprint("Could not get cluster version"))
|
||||
t.Skipf("Could not get cluster version")
|
||||
}
|
||||
os.Setenv(varName, fmt.Sprintf("v%s", cv.Core().Original()))
|
||||
}
|
||||
|
||||
func skipIfClusterVersionLessThan(t *testing.T, vs string) {
|
||||
if clusterVersionLessThan(vs) {
|
||||
t.Skip(fmt.Sprintf("This test does not run on cluster versions below %v", vs))
|
||||
t.Skipf("This test does not run on cluster versions below %v", vs)
|
||||
}
|
||||
}
|
||||
|
||||
func skipIfClusterVersionGreaterThanOrEqual(t *testing.T, vs string) {
|
||||
if clusterVersionGreaterThanOrEqual(vs) {
|
||||
t.Skip(fmt.Sprintf("This test does not run on cluster versions %v and above", vs))
|
||||
t.Skipf("This test does not run on cluster versions %v and above", vs)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,15 +6,16 @@ package kubernetes
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"k8s.io/api/certificates/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"log"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"k8s.io/api/certificates/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kretry "k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
func resourceKubernetesCertificateSigningRequest() *schema.Resource {
|
||||
|
|
@ -115,7 +116,7 @@ func resourceKubernetesCertificateSigningRequestCreate(ctx context.Context, d *s
|
|||
defer conn.CertificatesV1beta1().CertificateSigningRequests().Delete(ctx, csrName, metav1.DeleteOptions{})
|
||||
|
||||
if d.Get("auto_approve").(bool) {
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
retryErr := kretry.RetryOnConflict(kretry.DefaultRetry, func() error {
|
||||
pendingCSR, getErr := conn.CertificatesV1beta1().CertificateSigningRequests().Get(ctx, csrName, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
return getErr
|
||||
|
|
@ -137,7 +138,7 @@ func resourceKubernetesCertificateSigningRequestCreate(ctx context.Context, d *s
|
|||
}
|
||||
|
||||
log.Printf("[DEBUG] Waiting for certificate to be issued")
|
||||
stateConf := &resource.StateChangeConf{
|
||||
stateConf := &retry.StateChangeConf{
|
||||
Target: []string{"Issued"},
|
||||
Pending: []string{"", "Approved"},
|
||||
Timeout: d.Timeout(schema.TimeoutCreate),
|
||||
|
|
|
|||
|
|
@ -5,18 +5,18 @@ package kubernetes
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
|
||||
certificates "k8s.io/api/certificates/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/retry"
|
||||
kretry "k8s.io/client-go/util/retry"
|
||||
)
|
||||
|
||||
func resourceKubernetesCertificateSigningRequestV1() *schema.Resource {
|
||||
|
|
@ -115,7 +115,7 @@ func resourceKubernetesCertificateSigningRequestV1Create(ctx context.Context, d
|
|||
defer conn.CertificatesV1().CertificateSigningRequests().Delete(ctx, csrName, metav1.DeleteOptions{})
|
||||
|
||||
if d.Get("auto_approve").(bool) {
|
||||
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
retryErr := kretry.RetryOnConflict(kretry.DefaultRetry, func() error {
|
||||
pendingCSR, getErr := conn.CertificatesV1().CertificateSigningRequests().Get(
|
||||
ctx, csrName, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
|
|
@ -140,11 +140,11 @@ func resourceKubernetesCertificateSigningRequestV1Create(ctx context.Context, d
|
|||
}
|
||||
|
||||
log.Printf("[DEBUG] Waiting for certificate to be issued")
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError {
|
||||
out, err := conn.CertificatesV1().CertificateSigningRequests().Get(ctx, csrName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Received error: %v", err)
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
// Check to see if a certificate has been issued, and update status accordingly,
|
||||
|
|
@ -159,8 +159,7 @@ func resourceKubernetesCertificateSigningRequestV1Create(ctx context.Context, d
|
|||
}
|
||||
}
|
||||
log.Printf("[DEBUG] CertificateSigningRequest %s status received: %#v", csrName, out.Status)
|
||||
return resource.RetryableError(fmt.Errorf(
|
||||
"Waiting for certificate to be issued"))
|
||||
return retry.RetryableError(errors.New("Waiting for certificate to be issued"))
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
|
|
@ -179,17 +179,17 @@ func resourceKubernetesCronJobV1Delete(ctx context.Context, d *schema.ResourceDa
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
_, err := conn.BatchV1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Cron Job %s still exists", name)
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"k8s.io/api/batch/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
@ -192,17 +192,17 @@ func resourceKubernetesCronJobV1Beta1Delete(ctx context.Context, d *schema.Resou
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
_, err := conn.BatchV1beta1().CronJobs(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Cron Job %s still exists", name)
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
|
||||
|
|
@ -35,7 +35,7 @@ func resourceKubernetesCSIDriverV1() *schema.Resource {
|
|||
"metadata": metadataSchema("csi driver", true),
|
||||
"spec": {
|
||||
Type: schema.TypeList,
|
||||
Description: fmt.Sprintf("Spec of the CSIDriver"),
|
||||
Description: "Spec of the CSIDriver",
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
|
|
@ -174,17 +174,17 @@ func resourceKubernetesCSIDriverV1Delete(ctx context.Context, d *schema.Resource
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
_, err := conn.StorageV1().CSIDrivers().Get(ctx, d.Id(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
e := fmt.Errorf("CSIDriver (%s) still exists", d.Id())
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
storage "k8s.io/api/storage/v1beta1"
|
||||
|
|
@ -32,7 +32,7 @@ func resourceKubernetesCSIDriverV1Beta1() *schema.Resource {
|
|||
"metadata": metadataSchema("csi driver", true),
|
||||
"spec": {
|
||||
Type: schema.TypeList,
|
||||
Description: fmt.Sprint("Spec of the CSIDriver"),
|
||||
Description: "Spec of the CSIDriver",
|
||||
Optional: true,
|
||||
MaxItems: 1,
|
||||
Elem: &schema.Resource{
|
||||
|
|
@ -171,17 +171,17 @@ func resourceKubernetesCSIDriverV1Beta1Delete(ctx context.Context, d *schema.Res
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
_, err := conn.StorageV1beta1().CSIDrivers().Get(ctx, d.Id(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
e := fmt.Errorf("CSIDriver (%s) still exists", d.Id())
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
|
||||
|
|
@ -162,7 +162,7 @@ func resourceKubernetesDaemonSetV1Create(ctx context.Context, d *schema.Resource
|
|||
}
|
||||
|
||||
if d.Get("wait_for_rollout").(bool) {
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate),
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate),
|
||||
waitForDaemonSetReplicasFunc(ctx, conn, metadata.Namespace, metadata.Name))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -213,7 +213,7 @@ func resourceKubernetesDaemonSetV1Update(ctx context.Context, d *schema.Resource
|
|||
log.Printf("[INFO] Submitted updated daemonset: %#v", out)
|
||||
|
||||
if d.Get("wait_for_rollout").(bool) {
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate),
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate),
|
||||
waitForDaemonSetReplicasFunc(ctx, conn, namespace, name))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -320,11 +320,11 @@ func resourceKubernetesDaemonSetV1Exists(ctx context.Context, d *schema.Resource
|
|||
return true, err
|
||||
}
|
||||
|
||||
func waitForDaemonSetReplicasFunc(ctx context.Context, conn *kubernetes.Clientset, ns, name string) resource.RetryFunc {
|
||||
return func() *resource.RetryError {
|
||||
func waitForDaemonSetReplicasFunc(ctx context.Context, conn *kubernetes.Clientset, ns, name string) retry.RetryFunc {
|
||||
return func() *retry.RetryError {
|
||||
daemonSet, err := conn.AppsV1().DaemonSets(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
desiredReplicas := daemonSet.Status.DesiredNumberScheduled
|
||||
|
|
@ -335,7 +335,7 @@ func waitForDaemonSetReplicasFunc(ctx context.Context, conn *kubernetes.Clientse
|
|||
return nil
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("Waiting for %d replicas of %q to be scheduled (%d)",
|
||||
return retry.RetryableError(fmt.Errorf("Waiting for %d replicas of %q to be scheduled (%d)",
|
||||
desiredReplicas, daemonSet.GetName(), daemonSet.Status.CurrentNumberScheduled))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import (
|
|||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
@ -44,15 +44,15 @@ func resourceKubernetesDefaultServiceAccountV1Create(ctx context.Context, d *sch
|
|||
svcAcc := corev1.ServiceAccount{ObjectMeta: metadata}
|
||||
|
||||
log.Printf("[INFO] Checking for default service account existence: %s", metadata.Namespace)
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError {
|
||||
_, err := conn.CoreV1().ServiceAccounts(metadata.Namespace).Get(ctx, metadata.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
log.Printf("[INFO] Default service account does not exist, will retry: %s", metadata.Namespace)
|
||||
return resource.RetryableError(err)
|
||||
return retry.RetryableError(err)
|
||||
}
|
||||
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Default service account exists: %s", metadata.Namespace)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
|
||||
|
|
@ -245,7 +245,7 @@ func resourceKubernetesDeploymentV1Create(ctx context.Context, d *schema.Resourc
|
|||
|
||||
if d.Get("wait_for_rollout").(bool) {
|
||||
log.Printf("[INFO] Waiting for deployment %s/%s to rollout", out.ObjectMeta.Namespace, out.ObjectMeta.Name)
|
||||
err := resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate),
|
||||
err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate),
|
||||
waitForDeploymentReplicasFunc(ctx, conn, out.GetNamespace(), out.GetName()))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -305,7 +305,7 @@ func resourceKubernetesDeploymentV1Update(ctx context.Context, d *schema.Resourc
|
|||
|
||||
if d.Get("wait_for_rollout").(bool) {
|
||||
log.Printf("[INFO] Waiting for deployment %s/%s to rollout", out.ObjectMeta.Namespace, out.ObjectMeta.Name)
|
||||
err := resource.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate),
|
||||
err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate),
|
||||
waitForDeploymentReplicasFunc(ctx, conn, out.GetNamespace(), out.GetName()))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -381,17 +381,17 @@ func resourceKubernetesDeploymentV1Delete(ctx context.Context, d *schema.Resourc
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
_, err := conn.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Deployment (%s) still exists", d.Id())
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -437,12 +437,12 @@ func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.Depl
|
|||
return nil
|
||||
}
|
||||
|
||||
func waitForDeploymentReplicasFunc(ctx context.Context, conn *kubernetes.Clientset, ns, name string) resource.RetryFunc {
|
||||
return func() *resource.RetryError {
|
||||
func waitForDeploymentReplicasFunc(ctx context.Context, conn *kubernetes.Clientset, ns, name string) retry.RetryFunc {
|
||||
return func() *retry.RetryError {
|
||||
// Query the deployment to get a status update.
|
||||
dply, err := conn.AppsV1().Deployments(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
var specReplicas int32 = 1 // default, according to API docs
|
||||
|
|
@ -451,33 +451,33 @@ func waitForDeploymentReplicasFunc(ctx context.Context, conn *kubernetes.Clients
|
|||
}
|
||||
|
||||
if dply.Generation > dply.Status.ObservedGeneration {
|
||||
return resource.RetryableError(fmt.Errorf("Waiting for rollout to start"))
|
||||
return retry.RetryableError(fmt.Errorf("Waiting for rollout to start"))
|
||||
}
|
||||
|
||||
if dply.Generation == dply.Status.ObservedGeneration {
|
||||
cond := GetDeploymentCondition(dply.Status, appsv1.DeploymentProgressing)
|
||||
if cond != nil && cond.Reason == TimedOutReason {
|
||||
return resource.NonRetryableError(fmt.Errorf("Deployment exceeded its progress deadline"))
|
||||
return retry.NonRetryableError(fmt.Errorf("Deployment exceeded its progress deadline"))
|
||||
}
|
||||
|
||||
if dply.Status.UpdatedReplicas < specReplicas {
|
||||
return resource.RetryableError(fmt.Errorf("Waiting for rollout to finish: %d out of %d new replicas have been updated...", dply.Status.UpdatedReplicas, specReplicas))
|
||||
return retry.RetryableError(fmt.Errorf("Waiting for rollout to finish: %d out of %d new replicas have been updated...", dply.Status.UpdatedReplicas, specReplicas))
|
||||
}
|
||||
|
||||
if dply.Status.Replicas > dply.Status.UpdatedReplicas {
|
||||
return resource.RetryableError(fmt.Errorf("Waiting for rollout to finish: %d old replicas are pending termination...", dply.Status.Replicas-dply.Status.UpdatedReplicas))
|
||||
return retry.RetryableError(fmt.Errorf("Waiting for rollout to finish: %d old replicas are pending termination...", dply.Status.Replicas-dply.Status.UpdatedReplicas))
|
||||
}
|
||||
|
||||
if dply.Status.Replicas > dply.Status.ReadyReplicas {
|
||||
return resource.RetryableError(fmt.Errorf("Waiting for rollout to finish: %d replicas wanted; %d replicas Ready", dply.Status.Replicas, dply.Status.ReadyReplicas))
|
||||
return retry.RetryableError(fmt.Errorf("Waiting for rollout to finish: %d replicas wanted; %d replicas Ready", dply.Status.Replicas, dply.Status.ReadyReplicas))
|
||||
}
|
||||
|
||||
if dply.Status.AvailableReplicas < dply.Status.UpdatedReplicas {
|
||||
return resource.RetryableError(fmt.Errorf("Waiting for rollout to finish: %d of %d updated replicas are available...", dply.Status.AvailableReplicas, dply.Status.UpdatedReplicas))
|
||||
return retry.RetryableError(fmt.Errorf("Waiting for rollout to finish: %d of %d updated replicas are available...", dply.Status.AvailableReplicas, dply.Status.UpdatedReplicas))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return resource.NonRetryableError(fmt.Errorf("Observed generation %d is not expected to be greater than generation %d", dply.Status.ObservedGeneration, dply.Generation))
|
||||
return retry.NonRetryableError(fmt.Errorf("Observed generation %d is not expected to be greater than generation %d", dply.Status.ObservedGeneration, dply.Generation))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
@ -193,17 +193,17 @@ func resourceKubernetesIngressClassV1Delete(ctx context.Context, d *schema.Resou
|
|||
return diag.Errorf("Failed to delete Ingress Class %s because: %s", d.Id(), err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
_, err := conn.NetworkingV1().IngressClasses().Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Ingress Class (%s) still exists", d.Id())
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
networking "k8s.io/api/networking/v1"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
@ -197,28 +197,28 @@ func resourceKubernetesIngressV1Create(ctx context.Context, d *schema.ResourceDa
|
|||
}
|
||||
|
||||
log.Printf("[INFO] Waiting for load balancer to become ready: %#v", out)
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError {
|
||||
res, err := conn.NetworkingV1().Ingresses(metadata.Namespace).Get(ctx, metadata.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// NOTE it is possible in some HA apiserver setups that are eventually consistent
|
||||
// that we could get a 404 when doing a Get immediately after a Create
|
||||
if errors.IsNotFound(err) {
|
||||
return resource.RetryableError(err)
|
||||
return retry.RetryableError(err)
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
if len(res.Status.LoadBalancer.Ingress) > 0 {
|
||||
diagnostics := resourceKubernetesIngressV1Read(ctx, d, meta)
|
||||
if diagnostics.HasError() {
|
||||
errmsg := diagnostics[0].Summary
|
||||
return resource.NonRetryableError(fmt.Errorf("Error reading ingress: %v", errmsg))
|
||||
return retry.NonRetryableError(fmt.Errorf("Error reading ingress: %v", errmsg))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Load Balancer not ready yet...")
|
||||
return resource.RetryableError(fmt.Errorf("Load Balancer is not ready yet"))
|
||||
return retry.RetryableError(fmt.Errorf("Load Balancer is not ready yet"))
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -325,17 +325,17 @@ func resourceKubernetesIngressV1Delete(ctx context.Context, d *schema.ResourceDa
|
|||
return diag.Errorf("Failed to delete Ingress %s because: %s", d.Id(), err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
_, err := conn.NetworkingV1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Ingress (%s) still exists", d.Id())
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
networking "k8s.io/api/networking/v1beta1"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
@ -189,28 +189,28 @@ func resourceKubernetesIngressV1Beta1Create(ctx context.Context, d *schema.Resou
|
|||
}
|
||||
|
||||
log.Printf("[INFO] Waiting for load balancer to become ready: %#v", out)
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError {
|
||||
res, err := conn.ExtensionsV1beta1().Ingresses(metadata.Namespace).Get(ctx, metadata.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// NOTE it is possible in some HA apiserver setups that are eventually consistent
|
||||
// that we could get a 404 when doing a Get immediately after a Create
|
||||
if errors.IsNotFound(err) {
|
||||
return resource.RetryableError(err)
|
||||
return retry.RetryableError(err)
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
if len(res.Status.LoadBalancer.Ingress) > 0 {
|
||||
diagnostics := resourceKubernetesIngressV1Beta1Read(ctx, d, meta)
|
||||
if diagnostics.HasError() {
|
||||
errmsg := diagnostics[0].Summary
|
||||
return resource.NonRetryableError(fmt.Errorf("Error reading ingress: %v", errmsg))
|
||||
return retry.NonRetryableError(fmt.Errorf("Error reading ingress: %v", errmsg))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Load Balancer not ready yet...")
|
||||
return resource.RetryableError(fmt.Errorf("Load Balancer is not ready yet"))
|
||||
return retry.RetryableError(fmt.Errorf("Load Balancer is not ready yet"))
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -317,17 +317,17 @@ func resourceKubernetesIngressV1Beta1Delete(ctx context.Context, d *schema.Resou
|
|||
return diag.Errorf("Failed to delete Ingress %s because: %s", d.Id(), err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
_, err := conn.ExtensionsV1beta1().Ingresses(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Ingress (%s) still exists", d.Id())
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
|
|
@ -100,7 +100,7 @@ func resourceKubernetesJobV1Create(ctx context.Context, d *schema.ResourceData,
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
if d.Get("wait_for_completion").(bool) {
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate),
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate),
|
||||
retryUntilJobV1IsFinished(ctx, conn, namespace, name))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -198,7 +198,7 @@ func resourceKubernetesJobV1Update(ctx context.Context, d *schema.ResourceData,
|
|||
d.SetId(buildId(out.ObjectMeta))
|
||||
|
||||
if d.Get("wait_for_completion").(bool) {
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate),
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate),
|
||||
retryUntilJobV1IsFinished(ctx, conn, namespace, name))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -227,17 +227,17 @@ func resourceKubernetesJobV1Delete(ctx context.Context, d *schema.ResourceData,
|
|||
return diag.Errorf("Failed to delete Job! API error: %s", err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
_, err := conn.BatchV1().Jobs(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Job %s still exists", name)
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -272,14 +272,14 @@ func resourceKubernetesJobV1Exists(ctx context.Context, d *schema.ResourceData,
|
|||
}
|
||||
|
||||
// retryUntilJobV1IsFinished checks if a given job has finished its execution in either a Complete or Failed state
|
||||
func retryUntilJobV1IsFinished(ctx context.Context, conn *kubernetes.Clientset, ns, name string) resource.RetryFunc {
|
||||
return func() *resource.RetryError {
|
||||
func retryUntilJobV1IsFinished(ctx context.Context, conn *kubernetes.Clientset, ns, name string) retry.RetryFunc {
|
||||
return func() *retry.RetryError {
|
||||
job, err := conn.BatchV1().Jobs(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
for _, c := range job.Status.Conditions {
|
||||
|
|
@ -289,11 +289,11 @@ func retryUntilJobV1IsFinished(ctx context.Context, conn *kubernetes.Clientset,
|
|||
case batchv1.JobComplete:
|
||||
return nil
|
||||
case batchv1.JobFailed:
|
||||
return resource.NonRetryableError(fmt.Errorf("job: %s/%s is in failed state", ns, name))
|
||||
return retry.NonRetryableError(fmt.Errorf("job: %s/%s is in failed state", ns, name))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("job: %s/%s is not in complete state", ns, name))
|
||||
return retry.RetryableError(fmt.Errorf("job: %s/%s is not in complete state", ns, name))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
@ -64,15 +64,15 @@ func resourceKubernetesNamespaceV1Create(ctx context.Context, d *schema.Resource
|
|||
|
||||
if d.Get("wait_for_default_service_account").(bool) {
|
||||
log.Printf("[DEBUG] Waiting for default service account to be created")
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError {
|
||||
_, err := conn.CoreV1().ServiceAccounts(out.Name).Get(ctx, "default", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
log.Printf("[INFO] Default service account does not exist, will retry: %s", metadata.Namespace)
|
||||
return resource.RetryableError(err)
|
||||
return retry.RetryableError(err)
|
||||
}
|
||||
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Default service account exists: %s", metadata.Namespace)
|
||||
|
|
@ -154,7 +154,7 @@ func resourceKubernetesNamespaceV1Delete(ctx context.Context, d *schema.Resource
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
stateConf := &resource.StateChangeConf{
|
||||
stateConf := &retry.StateChangeConf{
|
||||
Target: []string{},
|
||||
Pending: []string{"Terminating"},
|
||||
Timeout: d.Timeout(schema.TimeoutDelete),
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
@ -113,7 +113,7 @@ func resourceKubernetesPersistentVolumeClaimV1Create(ctx context.Context, d *sch
|
|||
name := out.ObjectMeta.Name
|
||||
|
||||
if d.Get("wait_until_bound").(bool) {
|
||||
stateConf := &resource.StateChangeConf{
|
||||
stateConf := &retry.StateChangeConf{
|
||||
Target: []string{"Bound"},
|
||||
Pending: []string{"Pending"},
|
||||
Timeout: d.Timeout(schema.TimeoutCreate),
|
||||
|
|
@ -253,18 +253,18 @@ func resourceKubernetesPersistentVolumeClaimV1Delete(ctx context.Context, d *sch
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
out, err := conn.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Current state of persistent volume claim finalizers: %#v", out.Finalizers)
|
||||
e := fmt.Errorf("Persistent volume claim %s still exists with finalizers: %v", name, out.Finalizers)
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
storageapi "k8s.io/api/storage/v1"
|
||||
|
|
@ -579,15 +580,15 @@ func testAccCheckKubernetesPersistentVolumeClaimV1Destroy(s *terraform.State) er
|
|||
}
|
||||
|
||||
var resp *corev1.PersistentVolumeClaim
|
||||
err = resource.RetryContext(ctx, 3*time.Minute, func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, 3*time.Minute, func() *retry.RetryError {
|
||||
resp, err = conn.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
if err == nil && resp != nil {
|
||||
return resource.RetryableError(err)
|
||||
return retry.RetryableError(err)
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
api "k8s.io/api/core/v1"
|
||||
|
|
@ -228,7 +228,7 @@ func resourceKubernetesPersistentVolumeV1Create(ctx context.Context, d *schema.R
|
|||
}
|
||||
log.Printf("[INFO] Submitted new persistent volume: %#v", out)
|
||||
|
||||
stateConf := &resource.StateChangeConf{
|
||||
stateConf := &retry.StateChangeConf{
|
||||
Target: []string{"Available", "Bound"},
|
||||
Pending: []string{"Pending"},
|
||||
Timeout: d.Timeout(schema.TimeoutCreate),
|
||||
|
|
@ -335,18 +335,18 @@ func resourceKubernetesPersistentVolumeV1Delete(ctx context.Context, d *schema.R
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
out, err := conn.CoreV1().PersistentVolumes().Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if k8serrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Current state of persistent volume: %#v", out.Status.Phase)
|
||||
e := fmt.Errorf("Persistent volume %s still exists (%s)", name, out.Status.Phase)
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
@ -103,7 +103,7 @@ func resourceKubernetesPodV1Create(ctx context.Context, d *schema.ResourceData,
|
|||
|
||||
d.SetId(buildId(out.ObjectMeta))
|
||||
|
||||
stateConf := &resource.StateChangeConf{
|
||||
stateConf := &retry.StateChangeConf{
|
||||
Target: expandPodTargetState(d.Get("target_state").([]interface{})),
|
||||
Pending: []string{string(corev1.PodPending)},
|
||||
Timeout: d.Timeout(schema.TimeoutCreate),
|
||||
|
|
@ -233,18 +233,18 @@ func resourceKubernetesPodV1Delete(ctx context.Context, d *schema.ResourceData,
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
out, err := conn.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Current state of pod: %#v", out.Status.Phase)
|
||||
e := fmt.Errorf("Pod %s still exists (%s)", name, out.Status.Phase)
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
@ -139,7 +139,7 @@ func resourceKubernetesReplicationControllerV1Create(ctx context.Context, d *sch
|
|||
log.Printf("[DEBUG] Waiting for replication controller %s to schedule %d replicas",
|
||||
d.Id(), *out.Spec.Replicas)
|
||||
// 10 mins should be sufficient for scheduling ~10k replicas
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate),
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate),
|
||||
waitForDesiredReplicasFunc(ctx, conn, out.GetNamespace(), out.GetName()))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -233,7 +233,7 @@ func resourceKubernetesReplicationControllerV1Update(ctx context.Context, d *sch
|
|||
}
|
||||
log.Printf("[INFO] Submitted updated replication controller: %#v", out)
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate),
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate),
|
||||
waitForDesiredReplicasFunc(ctx, conn, namespace, name))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -271,7 +271,7 @@ func resourceKubernetesReplicationControllerV1Delete(ctx context.Context, d *sch
|
|||
}
|
||||
|
||||
// Wait until all replicas are gone
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete),
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete),
|
||||
waitForDesiredReplicasFunc(ctx, conn, namespace, name))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -286,17 +286,17 @@ func resourceKubernetesReplicationControllerV1Delete(ctx context.Context, d *sch
|
|||
}
|
||||
|
||||
// Wait for Delete to finish. Necessary for ForceNew operations.
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
_, err := conn.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Replication Controller (%s) still exists", d.Id())
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -329,11 +329,11 @@ func resourceKubernetesReplicationControllerV1Exists(ctx context.Context, d *sch
|
|||
return true, err
|
||||
}
|
||||
|
||||
func waitForDesiredReplicasFunc(ctx context.Context, conn *kubernetes.Clientset, ns, name string) resource.RetryFunc {
|
||||
return func() *resource.RetryError {
|
||||
func waitForDesiredReplicasFunc(ctx context.Context, conn *kubernetes.Clientset, ns, name string) retry.RetryFunc {
|
||||
return func() *retry.RetryError {
|
||||
rc, err := conn.CoreV1().ReplicationControllers(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
desiredReplicas := *rc.Spec.Replicas
|
||||
|
|
@ -344,7 +344,7 @@ func waitForDesiredReplicasFunc(ctx context.Context, conn *kubernetes.Clientset,
|
|||
return nil
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("Waiting for %d replicas of %q to be scheduled (%d)",
|
||||
return retry.RetryableError(fmt.Errorf("Waiting for %d replicas of %q to be scheduled (%d)",
|
||||
desiredReplicas, rc.GetName(), rc.Status.FullyLabeledReplicas))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
api "k8s.io/api/core/v1"
|
||||
|
|
@ -130,17 +130,17 @@ func resourceKubernetesResourceQuotaV1Create(ctx context.Context, d *schema.Reso
|
|||
log.Printf("[INFO] Submitted new resource quota: %#v", out)
|
||||
d.SetId(buildId(out.ObjectMeta))
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError {
|
||||
quota, err := conn.CoreV1().ResourceQuotas(out.Namespace).Get(ctx, out.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
if resourceListEquals(spec.Hard, quota.Status.Hard) {
|
||||
return nil
|
||||
}
|
||||
err = fmt.Errorf("Quotas don't match after creation.\nExpected: %#v\nGiven: %#v",
|
||||
spec.Hard, quota.Status.Hard)
|
||||
return resource.RetryableError(err)
|
||||
return retry.RetryableError(err)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -234,17 +234,17 @@ func resourceKubernetesResourceQuotaV1Update(ctx context.Context, d *schema.Reso
|
|||
d.SetId(buildId(out.ObjectMeta))
|
||||
|
||||
if waitForChangedSpec {
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), func() *retry.RetryError {
|
||||
quota, err := conn.CoreV1().ResourceQuotas(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
if resourceListEquals(spec.Hard, quota.Status.Hard) {
|
||||
return nil
|
||||
}
|
||||
err = fmt.Errorf("Quotas don't match after update.\nExpected: %#v\nGiven: %#v",
|
||||
spec.Hard, quota.Status.Hard)
|
||||
return resource.RetryableError(err)
|
||||
return retry.RetryableError(err)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
@ -124,7 +124,7 @@ func resourceKubernetesSecretV1Create(ctx context.Context, d *schema.ResourceDat
|
|||
secret.Type = corev1.SecretType(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOkExists("immutable"); ok {
|
||||
if v, ok := d.GetOk("immutable"); ok {
|
||||
secret.Immutable = ptrToBool(v.(bool))
|
||||
}
|
||||
|
||||
|
|
@ -140,11 +140,11 @@ func resourceKubernetesSecretV1Create(ctx context.Context, d *schema.ResourceDat
|
|||
if out.Type == corev1.SecretTypeServiceAccountToken && d.Get("wait_for_service_account_token").(bool) {
|
||||
log.Printf("[DEBUG] Waiting for secret service account token to be created")
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError {
|
||||
secret, err := conn.CoreV1().Secrets(out.Namespace).Get(ctx, out.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] Received error: %#v", err)
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Received secret: %#v", secret.Name)
|
||||
|
|
@ -153,7 +153,7 @@ func resourceKubernetesSecretV1Create(ctx context.Context, d *schema.ResourceDat
|
|||
return nil
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf(
|
||||
return retry.RetryableError(fmt.Errorf(
|
||||
"Waiting for secret %q to create service account token", d.Id()))
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
|
@ -124,15 +124,15 @@ func getServiceAccountDefaultSecretV1(ctx context.Context, name string, config c
|
|||
}
|
||||
|
||||
var svcAccTokens []corev1.Secret
|
||||
err = resource.RetryContext(ctx, timeout, func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, timeout, func() *retry.RetryError {
|
||||
resp, err := conn.CoreV1().ServiceAccounts(config.Namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
if len(resp.Secrets) == len(config.Secrets) {
|
||||
log.Printf("[DEBUG] Configuration contains %d secrets, saw %d, expected %d", len(config.Secrets), len(resp.Secrets), len(config.Secrets)+1)
|
||||
return resource.RetryableError(fmt.Errorf("Waiting for default secret of %q to appear", buildId(resp.ObjectMeta)))
|
||||
return retry.RetryableError(fmt.Errorf("Waiting for default secret of %q to appear", buildId(resp.ObjectMeta)))
|
||||
}
|
||||
|
||||
diff := diffObjectReferences(config.Secrets, resp.Secrets)
|
||||
|
|
@ -140,7 +140,7 @@ func getServiceAccountDefaultSecretV1(ctx context.Context, name string, config c
|
|||
FieldSelector: fmt.Sprintf("type=%s", corev1.SecretTypeServiceAccountToken),
|
||||
})
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
for _, secret := range secretList.Items {
|
||||
|
|
@ -153,11 +153,11 @@ func getServiceAccountDefaultSecretV1(ctx context.Context, name string, config c
|
|||
}
|
||||
|
||||
if len(svcAccTokens) == 0 {
|
||||
return resource.RetryableError(fmt.Errorf("Expected 1 generated service account token, %d found", len(svcAccTokens)))
|
||||
return retry.RetryableError(fmt.Errorf("Expected 1 generated service account token, %d found", len(svcAccTokens)))
|
||||
}
|
||||
|
||||
if len(svcAccTokens) > 1 {
|
||||
return resource.NonRetryableError(fmt.Errorf("Expected 1 generated service account token, %d found: %s", len(svcAccTokens), err))
|
||||
return retry.NonRetryableError(fmt.Errorf("Expected 1 generated service account token, %d found: %s", len(svcAccTokens), err))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -11,10 +11,9 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
api "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
@ -202,11 +201,11 @@ func resourceKubernetesServiceSchemaV1() map[string]*schema.Schema {
|
|||
Type: schema.TypeString,
|
||||
Description: "The IP protocol for this port. Supports `TCP` and `UDP`. Default is `TCP`.",
|
||||
Optional: true,
|
||||
Default: string(api.ProtocolTCP),
|
||||
Default: string(corev1.ProtocolTCP),
|
||||
ValidateFunc: validation.StringInSlice([]string{
|
||||
string(api.ProtocolTCP),
|
||||
string(api.ProtocolUDP),
|
||||
string(api.ProtocolSCTP),
|
||||
string(corev1.ProtocolTCP),
|
||||
string(corev1.ProtocolUDP),
|
||||
string(corev1.ProtocolSCTP),
|
||||
}, false),
|
||||
},
|
||||
"target_port": {
|
||||
|
|
@ -354,11 +353,11 @@ func resourceKubernetesServiceV1Create(ctx context.Context, d *schema.ResourceDa
|
|||
if out.Spec.Type == corev1.ServiceTypeLoadBalancer && d.Get("wait_for_load_balancer").(bool) {
|
||||
log.Printf("[DEBUG] Waiting for load balancer to assign IP/hostname")
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError {
|
||||
svc, err := conn.CoreV1().Services(out.Namespace).Get(ctx, out.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] Received error: %#v", err)
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
lbIngress := svc.Status.LoadBalancer.Ingress
|
||||
|
|
@ -368,7 +367,7 @@ func resourceKubernetesServiceV1Create(ctx context.Context, d *schema.ResourceDa
|
|||
return nil
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf(
|
||||
return retry.RetryableError(fmt.Errorf(
|
||||
"Waiting for service %q to assign IP/hostname for a load balancer", d.Id()))
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -491,17 +490,17 @@ func resourceKubernetesServiceV1Delete(ctx context.Context, d *schema.ResourceDa
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
_, err := conn.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Service (%s) still exists", d.Id())
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
|
@ -105,7 +105,7 @@ func resourceKubernetesStatefulSetV1Create(ctx context.Context, d *schema.Resour
|
|||
log.Printf("[INFO] Waiting for StatefulSet %s to rollout", id)
|
||||
namespace := out.ObjectMeta.Namespace
|
||||
name := out.ObjectMeta.Name
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutCreate),
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate),
|
||||
retryUntilStatefulSetRolloutComplete(ctx, conn, namespace, name))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -218,7 +218,7 @@ func resourceKubernetesStatefulSetV1Update(ctx context.Context, d *schema.Resour
|
|||
|
||||
if d.Get("wait_for_rollout").(bool) {
|
||||
log.Printf("[INFO] Waiting for StatefulSet %s to rollout", d.Id())
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate),
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate),
|
||||
retryUntilStatefulSetRolloutComplete(ctx, conn, namespace, name))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -247,20 +247,20 @@ func resourceKubernetesStatefulSetV1Delete(ctx context.Context, d *schema.Resour
|
|||
}
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
out, err := conn.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.IsNotFound(err):
|
||||
return nil
|
||||
default:
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Current state of StatefulSet: %#v", out.Status.Conditions)
|
||||
e := fmt.Errorf("StatefulSet %s still exists %#v", name, out.Status.Conditions)
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
@ -272,15 +272,15 @@ func resourceKubernetesStatefulSetV1Delete(ctx context.Context, d *schema.Resour
|
|||
}
|
||||
|
||||
// retryUntilStatefulSetRolloutComplete checks if a given job finished its execution and is either in 'Complete' or 'Failed' state.
|
||||
func retryUntilStatefulSetRolloutComplete(ctx context.Context, conn *kubernetes.Clientset, ns, name string) resource.RetryFunc {
|
||||
return func() *resource.RetryError {
|
||||
func retryUntilStatefulSetRolloutComplete(ctx context.Context, conn *kubernetes.Clientset, ns, name string) retry.RetryFunc {
|
||||
return func() *retry.RetryError {
|
||||
res, err := conn.AppsV1().StatefulSets(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
if res.Status.ReadyReplicas != *res.Spec.Replicas {
|
||||
return resource.RetryableError(fmt.Errorf("StatefulSet %s/%s is not finished rolling out", ns, name))
|
||||
return retry.RetryableError(fmt.Errorf("StatefulSet %s/%s is not finished rolling out", ns, name))
|
||||
}
|
||||
|
||||
// NOTE: This is what kubectl uses to determine if a rollout is done.
|
||||
|
|
@ -290,12 +290,12 @@ func retryUntilStatefulSetRolloutComplete(ctx context.Context, conn *kubernetes.
|
|||
gk := gvk.GroupKind()
|
||||
statusViewer, err := polymorphichelpers.StatusViewerFor(gk)
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(res)
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
// NOTE: For some reason, the Kind and apiVersion get lost when converting to unstructured.
|
||||
|
|
@ -307,13 +307,13 @@ func retryUntilStatefulSetRolloutComplete(ctx context.Context, conn *kubernetes.
|
|||
// for StatefulSet so it is set to 0 here
|
||||
_, done, err := statusViewer.Status(&u, 0)
|
||||
if err != nil {
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
if done {
|
||||
return nil
|
||||
}
|
||||
|
||||
return resource.RetryableError(fmt.Errorf("StatefulSet %s/%s is not finished rolling out", ns, name))
|
||||
return retry.RetryableError(fmt.Errorf("StatefulSet %s/%s is not finished rolling out", ns, name))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
|
@ -244,17 +244,17 @@ func resourceKubernetesStorageClassV1Delete(ctx context.Context, d *schema.Resou
|
|||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
err = resource.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *resource.RetryError {
|
||||
err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutDelete), func() *retry.RetryError {
|
||||
_, err := conn.StorageV1().StorageClasses().Get(ctx, d.Id(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if statusErr, ok := err.(*errors.StatusError); ok && errors.IsNotFound(statusErr) {
|
||||
return nil
|
||||
}
|
||||
return resource.NonRetryableError(err)
|
||||
return retry.NonRetryableError(err)
|
||||
}
|
||||
|
||||
e := fmt.Errorf("storage class (%s) still exists", d.Id())
|
||||
return resource.RetryableError(e)
|
||||
return retry.RetryableError(e)
|
||||
})
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
|
|
|||
|
|
@ -4,11 +4,8 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
"github.com/robfig/cron"
|
||||
)
|
||||
|
||||
func cronJobSpecFieldsV1() map[string]*schema.Schema {
|
||||
|
|
@ -49,7 +46,7 @@ func cronJobSpecFieldsV1() map[string]*schema.Schema {
|
|||
"schedule": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validateCronExpression(),
|
||||
ValidateFunc: validateCronExpression,
|
||||
Description: "Cron format string, e.g. 0 * * * * or @hourly, as schedule time of its jobs to be created and executed.",
|
||||
},
|
||||
"starting_deadline_seconds": {
|
||||
|
|
@ -77,18 +74,3 @@ func cronJobSpecFieldsV1() map[string]*schema.Schema {
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
func validateCronExpression() schema.SchemaValidateFunc {
|
||||
return func(i interface{}, k string) (s []string, es []error) {
|
||||
v, ok := i.(string)
|
||||
if !ok {
|
||||
es = append(es, fmt.Errorf("expected type of '%s' to be string", k))
|
||||
return
|
||||
}
|
||||
_, err := cron.ParseStandard(v)
|
||||
if err != nil {
|
||||
es = append(es, fmt.Errorf("'%s' should be an valid Cron expression", k))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ func cronJobSpecFieldsV1Beta1() map[string]*schema.Schema {
|
|||
"schedule": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validateCronExpression(),
|
||||
ValidateFunc: validateCronExpression,
|
||||
Description: "Cron format string, e.g. 0 * * * * or @hourly, as schedule time of its jobs to be created and executed.",
|
||||
},
|
||||
"starting_deadline_seconds": {
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
api "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||
|
|
@ -76,13 +76,13 @@ func podSpecFields(isUpdatable, isComputed bool) map[string]*schema.Schema {
|
|||
Optional: true,
|
||||
Computed: isComputed,
|
||||
ForceNew: !isUpdatable,
|
||||
Default: conditionalDefault(!isComputed, string(api.DNSClusterFirst)),
|
||||
Default: conditionalDefault(!isComputed, string(corev1.DNSClusterFirst)),
|
||||
Description: "Set DNS policy for containers within the pod. Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. Defaults to 'ClusterFirst'. More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy",
|
||||
ValidateFunc: validation.StringInSlice([]string{
|
||||
string(api.DNSClusterFirst),
|
||||
string(api.DNSClusterFirstWithHostNet),
|
||||
string(api.DNSDefault),
|
||||
string(api.DNSNone),
|
||||
string(corev1.DNSClusterFirst),
|
||||
string(corev1.DNSClusterFirstWithHostNet),
|
||||
string(corev1.DNSDefault),
|
||||
string(corev1.DNSNone),
|
||||
}, false),
|
||||
},
|
||||
"dns_config": {
|
||||
|
|
@ -211,7 +211,7 @@ func podSpecFields(isUpdatable, isComputed bool) map[string]*schema.Schema {
|
|||
"name": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{string(api.Linux), string(api.Windows)}, false),
|
||||
ValidateFunc: validation.StringInSlice([]string{string(corev1.Linux), string(corev1.Windows)}, false),
|
||||
Description: "Name is the name of the operating system. The currently supported values are linux and windows.",
|
||||
},
|
||||
},
|
||||
|
|
@ -266,12 +266,12 @@ func podSpecFields(isUpdatable, isComputed bool) map[string]*schema.Schema {
|
|||
Optional: true,
|
||||
Computed: isComputed,
|
||||
ForceNew: !isUpdatable,
|
||||
Default: conditionalDefault(!isComputed, string(api.RestartPolicyAlways)),
|
||||
Default: conditionalDefault(!isComputed, string(corev1.RestartPolicyAlways)),
|
||||
Description: "Restart policy for all containers within the pod. One of Always, OnFailure, Never. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy.",
|
||||
ValidateFunc: validation.StringInSlice([]string{
|
||||
string(api.RestartPolicyAlways),
|
||||
string(api.RestartPolicyOnFailure),
|
||||
string(api.RestartPolicyNever),
|
||||
string(corev1.RestartPolicyAlways),
|
||||
string(corev1.RestartPolicyOnFailure),
|
||||
string(corev1.RestartPolicyNever),
|
||||
}, false),
|
||||
},
|
||||
"security_context": {
|
||||
|
|
@ -332,8 +332,8 @@ func podSpecFields(isUpdatable, isComputed bool) map[string]*schema.Schema {
|
|||
Description: "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir.",
|
||||
Optional: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{
|
||||
string(api.FSGroupChangeAlways),
|
||||
string(api.FSGroupChangeOnRootMismatch),
|
||||
string(corev1.FSGroupChangeAlways),
|
||||
string(corev1.FSGroupChangeOnRootMismatch),
|
||||
}, false),
|
||||
ForceNew: !isUpdatable,
|
||||
},
|
||||
|
|
@ -450,9 +450,9 @@ func podSpecFields(isUpdatable, isComputed bool) map[string]*schema.Schema {
|
|||
Optional: true,
|
||||
ForceNew: !isUpdatable,
|
||||
ValidateFunc: validation.StringInSlice([]string{
|
||||
string(api.TaintEffectNoSchedule),
|
||||
string(api.TaintEffectPreferNoSchedule),
|
||||
string(api.TaintEffectNoExecute),
|
||||
string(corev1.TaintEffectNoSchedule),
|
||||
string(corev1.TaintEffectPreferNoSchedule),
|
||||
string(corev1.TaintEffectNoExecute),
|
||||
}, false),
|
||||
},
|
||||
"key": {
|
||||
|
|
@ -464,12 +464,12 @@ func podSpecFields(isUpdatable, isComputed bool) map[string]*schema.Schema {
|
|||
"operator": {
|
||||
Type: schema.TypeString,
|
||||
Description: "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.",
|
||||
Default: string(api.TolerationOpEqual),
|
||||
Default: string(corev1.TolerationOpEqual),
|
||||
Optional: true,
|
||||
ForceNew: !isUpdatable,
|
||||
ValidateFunc: validation.StringInSlice([]string{
|
||||
string(api.TolerationOpExists),
|
||||
string(api.TolerationOpEqual),
|
||||
string(corev1.TolerationOpExists),
|
||||
string(corev1.TolerationOpEqual),
|
||||
}, false),
|
||||
},
|
||||
"toleration_seconds": {
|
||||
|
|
@ -510,11 +510,11 @@ func podSpecFields(isUpdatable, isComputed bool) map[string]*schema.Schema {
|
|||
"when_unsatisfiable": {
|
||||
Type: schema.TypeString,
|
||||
Description: "indicates how to deal with a pod if it doesn't satisfy the spread constraint.",
|
||||
Default: string(api.DoNotSchedule),
|
||||
Default: string(corev1.DoNotSchedule),
|
||||
Optional: true,
|
||||
ValidateFunc: validation.StringInSlice([]string{
|
||||
string(api.DoNotSchedule),
|
||||
string(api.ScheduleAnyway),
|
||||
string(corev1.DoNotSchedule),
|
||||
string(corev1.ScheduleAnyway),
|
||||
}, false),
|
||||
},
|
||||
"label_selector": {
|
||||
|
|
@ -568,7 +568,7 @@ func volumeSchema(isUpdatable bool) *schema.Resource {
|
|||
"path": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validateAttributeValueDoesNotContain(".."),
|
||||
ValidateFunc: validatePath,
|
||||
Description: `The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.`,
|
||||
},
|
||||
},
|
||||
|
|
@ -606,7 +606,7 @@ func volumeSchema(isUpdatable bool) *schema.Resource {
|
|||
Type: schema.TypeString,
|
||||
Description: "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.",
|
||||
Optional: true,
|
||||
ValidateFunc: validateAttributeValueDoesNotContain(".."),
|
||||
ValidateFunc: validatePath,
|
||||
},
|
||||
"repository": {
|
||||
Type: schema.TypeString,
|
||||
|
|
@ -671,7 +671,7 @@ func volumeSchema(isUpdatable bool) *schema.Resource {
|
|||
"path": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validateAttributeValueDoesNotContain(".."),
|
||||
ValidateFunc: validatePath,
|
||||
Description: `Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'`,
|
||||
},
|
||||
"resource_field_ref": {
|
||||
|
|
@ -715,12 +715,15 @@ func volumeSchema(isUpdatable bool) *schema.Resource {
|
|||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"medium": {
|
||||
Type: schema.TypeString,
|
||||
Description: `What type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir`,
|
||||
Optional: true,
|
||||
Default: "",
|
||||
ForceNew: !isUpdatable,
|
||||
ValidateFunc: validateAttributeValueIsIn([]string{"", "Memory"}),
|
||||
Type: schema.TypeString,
|
||||
Description: `What type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir`,
|
||||
Optional: true,
|
||||
Default: "",
|
||||
ForceNew: !isUpdatable,
|
||||
ValidateFunc: validation.StringInSlice([]string{
|
||||
string(corev1.StorageMediumDefault),
|
||||
string(corev1.StorageMediumMemory),
|
||||
}, false),
|
||||
},
|
||||
"size_limit": {
|
||||
Type: schema.TypeString,
|
||||
|
|
@ -844,7 +847,7 @@ func volumeSchema(isUpdatable bool) *schema.Resource {
|
|||
"path": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validateAttributeValueDoesNotContain(".."),
|
||||
ValidateFunc: validatePath,
|
||||
Description: "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.",
|
||||
},
|
||||
},
|
||||
|
|
@ -921,7 +924,7 @@ func volumeSchema(isUpdatable bool) *schema.Resource {
|
|||
"path": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validateAttributeValueDoesNotContain(".."),
|
||||
ValidateFunc: validatePath,
|
||||
Description: "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.",
|
||||
},
|
||||
},
|
||||
|
|
@ -967,7 +970,7 @@ func volumeSchema(isUpdatable bool) *schema.Resource {
|
|||
"path": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ValidateFunc: validateAttributeValueDoesNotContain(".."),
|
||||
ValidateFunc: validatePath,
|
||||
Description: "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.",
|
||||
},
|
||||
},
|
||||
|
|
@ -1025,7 +1028,7 @@ func volumeSchema(isUpdatable bool) *schema.Resource {
|
|||
"path": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ValidateFunc: validateAttributeValueDoesNotContain(".."),
|
||||
ValidateFunc: validatePath,
|
||||
Description: "Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'",
|
||||
},
|
||||
"resource_field_ref": {
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ import (
|
|||
"errors"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
api "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
|
|
@ -89,7 +88,7 @@ func expandPersistentVolumeClaimSpec(l []interface{}) (*corev1.PersistentVolumeC
|
|||
obj.StorageClassName = ptrToString(v)
|
||||
}
|
||||
if v, ok := in["volume_mode"].(string); ok && v != "" {
|
||||
obj.VolumeMode = pointerOf(api.PersistentVolumeMode(v))
|
||||
obj.VolumeMode = pointerOf(corev1.PersistentVolumeMode(v))
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ func flattenAWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSo
|
|||
if in.Partition != 0 {
|
||||
att["partition"] = in.Partition
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -47,7 +47,7 @@ func flattenAzureFileVolumeSource(in *v1.AzureFileVolumeSource) []interface{} {
|
|||
att := make(map[string]interface{})
|
||||
att["secret_name"] = in.SecretName
|
||||
att["share_name"] = in.ShareName
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -57,7 +57,7 @@ func flattenAzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSour
|
|||
att := make(map[string]interface{})
|
||||
att["secret_name"] = in.SecretName
|
||||
att["share_name"] = in.ShareName
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
if in.SecretNamespace != nil {
|
||||
|
|
@ -81,7 +81,7 @@ func flattenCephFSVolumeSource(in *v1.CephFSVolumeSource) []interface{} {
|
|||
if in.SecretRef != nil {
|
||||
att["secret_ref"] = flattenLocalObjectReference(in.SecretRef)
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -102,7 +102,7 @@ func flattenCephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource) []
|
|||
if in.SecretRef != nil {
|
||||
att["secret_ref"] = flattenSecretReference(in.SecretRef)
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -114,7 +114,7 @@ func flattenCinderPersistentVolumeSource(in *v1.CinderPersistentVolumeSource) []
|
|||
if in.FSType != "" {
|
||||
att["fs_type"] = in.FSType
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -126,7 +126,7 @@ func flattenCinderVolumeSource(in *v1.CinderVolumeSource) []interface{} {
|
|||
if in.FSType != "" {
|
||||
att["fs_type"] = in.FSType
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -139,7 +139,7 @@ func flattenFCVolumeSource(in *v1.FCVolumeSource) []interface{} {
|
|||
if in.FSType != "" {
|
||||
att["fs_type"] = in.FSType
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -154,7 +154,7 @@ func flattenFlexPersistentVolumeSource(in *v1.FlexPersistentVolumeSource) []inte
|
|||
if in.SecretRef != nil {
|
||||
att["secret_ref"] = flattenSecretReference(in.SecretRef)
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
if len(in.Options) > 0 {
|
||||
|
|
@ -172,7 +172,7 @@ func flattenFlexVolumeSource(in *v1.FlexVolumeSource) []interface{} {
|
|||
if in.SecretRef != nil {
|
||||
att["secret_ref"] = flattenLocalObjectReference(in.SecretRef)
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
if len(in.Options) > 0 {
|
||||
|
|
@ -197,7 +197,7 @@ func flattenGCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource)
|
|||
if in.Partition != 0 {
|
||||
att["partition"] = in.Partition
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -207,7 +207,7 @@ func flattenGlusterfsPersistentVolumeSource(in *v1.GlusterfsPersistentVolumeSour
|
|||
att := make(map[string]interface{})
|
||||
att["endpoints_name"] = in.EndpointsName
|
||||
att["path"] = in.Path
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -217,7 +217,7 @@ func flattenGlusterfsVolumeSource(in *v1.GlusterfsVolumeSource) []interface{} {
|
|||
att := make(map[string]interface{})
|
||||
att["endpoints_name"] = in.EndpointsName
|
||||
att["path"] = in.Path
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -255,7 +255,7 @@ func flattenISCSIVolumeSource(in *v1.ISCSIVolumeSource) []interface{} {
|
|||
if in.FSType != "" {
|
||||
att["fs_type"] = in.FSType
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -278,7 +278,7 @@ func flattenISCSIPersistentVolumeSource(in *v1.ISCSIPersistentVolumeSource) []in
|
|||
if in.FSType != "" {
|
||||
att["fs_type"] = in.FSType
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -307,7 +307,7 @@ func flattenNFSVolumeSource(in *v1.NFSVolumeSource) []interface{} {
|
|||
att := make(map[string]interface{})
|
||||
att["server"] = in.Server
|
||||
att["path"] = in.Path
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -474,7 +474,7 @@ func flattenQuobyteVolumeSource(in *v1.QuobyteVolumeSource) []interface{} {
|
|||
att := make(map[string]interface{})
|
||||
att["registry"] = in.Registry
|
||||
att["volume"] = in.Volume
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
if in.User != "" {
|
||||
|
|
@ -505,7 +505,7 @@ func flattenRBDVolumeSource(in *v1.RBDVolumeSource) []interface{} {
|
|||
if in.SecretRef != nil {
|
||||
att["secret_ref"] = flattenLocalObjectReference(in.SecretRef)
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
@ -530,7 +530,7 @@ func flattenRBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource) []interf
|
|||
if in.SecretRef != nil {
|
||||
att["secret_ref"] = flattenSecretReference(in.SecretRef)
|
||||
}
|
||||
if in.ReadOnly != false {
|
||||
if in.ReadOnly {
|
||||
att["read_only"] = in.ReadOnly
|
||||
}
|
||||
return []interface{}{att}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
|
|
@ -31,7 +30,7 @@ func TestIsInternalKey(t *testing.T) {
|
|||
{"pv.kubernetes.io/any/path", true},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("%s", tc.Key), func(t *testing.T) {
|
||||
t.Run(tc.Key, func(t *testing.T) {
|
||||
isInternal := isInternalKey(tc.Key)
|
||||
if tc.Expected && isInternal != tc.Expected {
|
||||
t.Fatalf("Expected %q to be internal", tc.Key)
|
||||
|
|
|
|||
|
|
@ -5,11 +5,14 @@ package kubernetes
|
|||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||
"github.com/robfig/cron"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
apiValidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
utilValidation "k8s.io/apimachinery/pkg/util/validation"
|
||||
|
|
@ -121,9 +124,9 @@ func validatePortName(value interface{}, key string) (ws []string, es []error) {
|
|||
return
|
||||
}
|
||||
func validatePortNumOrName(value interface{}, key string) (ws []string, es []error) {
|
||||
switch value.(type) {
|
||||
switch t := value.(type) {
|
||||
case string:
|
||||
intVal, err := strconv.Atoi(value.(string))
|
||||
intVal, err := strconv.Atoi(t)
|
||||
if err != nil {
|
||||
return validatePortName(value, key)
|
||||
}
|
||||
|
|
@ -236,36 +239,30 @@ func validateModeBits(value interface{}, key string) (ws []string, es []error) {
|
|||
return
|
||||
}
|
||||
|
||||
func validateAttributeValueDoesNotContain(searchString string) schema.SchemaValidateFunc {
|
||||
return func(v interface{}, k string) (ws []string, errors []error) {
|
||||
input := v.(string)
|
||||
if strings.Contains(input, searchString) {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must not contain %q",
|
||||
k, searchString))
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func validateAttributeValueIsIn(validValues []string) schema.SchemaValidateFunc {
|
||||
return func(v interface{}, k string) (ws []string, errors []error) {
|
||||
input := v.(string)
|
||||
isValid := false
|
||||
for _, s := range validValues {
|
||||
if s == input {
|
||||
isValid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isValid {
|
||||
errors = append(errors, fmt.Errorf(
|
||||
"%q must contain a value from %#v, got %q",
|
||||
k, validValues, input))
|
||||
}
|
||||
return
|
||||
// validatePath makes sure path:
|
||||
// - is not abs path
|
||||
// - does not contain any '..' elements
|
||||
// - does not start with '..'
|
||||
func validatePath(v interface{}, k string) ([]string, []error) {
|
||||
// inherit logic from the Kubernetes API validation: https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/core/validation/validation.go
|
||||
targetPath := v.(string)
|
||||
|
||||
if path.IsAbs(targetPath) {
|
||||
return []string{}, []error{errors.New("must be a relative path")}
|
||||
}
|
||||
|
||||
parts := strings.Split(filepath.ToSlash(targetPath), "/")
|
||||
for _, part := range parts {
|
||||
if part == ".." {
|
||||
return []string{}, []error{fmt.Errorf("%q must not contain %q", k, "..")}
|
||||
}
|
||||
}
|
||||
|
||||
if strings.HasPrefix(targetPath, "..") {
|
||||
return []string{}, []error{fmt.Errorf("%q must not start with %q", k, "..")}
|
||||
}
|
||||
|
||||
return []string{}, []error{}
|
||||
}
|
||||
|
||||
func validateTypeStringNullableIntOrPercent(v interface{}, key string) (ws []string, es []error) {
|
||||
|
|
@ -293,3 +290,14 @@ func validateTypeStringNullableIntOrPercent(v interface{}, key string) (ws []str
|
|||
|
||||
return
|
||||
}
|
||||
|
||||
func validateCronExpression(v interface{}, k string) ([]string, []error) {
|
||||
errors := make([]error, 0)
|
||||
|
||||
_, err := cron.ParseStandard(v.(string))
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("%q should be an valid Cron expression", k))
|
||||
}
|
||||
|
||||
return []string{}, errors
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ package openapi
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
|
@ -166,7 +166,7 @@ func TestGetType(t *testing.T) {
|
|||
func buildFixtureFoundry() (Foundry, error) {
|
||||
sfile := filepath.Join("testdata", "k8s-swagger.json")
|
||||
|
||||
input, err := ioutil.ReadFile(sfile)
|
||||
input, err := os.ReadFile(sfile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load definition file: %s : %s", sfile, err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,13 +29,13 @@ func ToTFValue(in interface{}, st tftypes.Type, th map[string]string, at *tftype
|
|||
if in == nil {
|
||||
return tftypes.NewValue(st, nil), nil
|
||||
}
|
||||
switch in.(type) {
|
||||
switch t := in.(type) {
|
||||
case string:
|
||||
switch {
|
||||
case st.Is(tftypes.String) || st.Is(tftypes.DynamicPseudoType):
|
||||
return tftypes.NewValue(tftypes.String, in.(string)), nil
|
||||
return tftypes.NewValue(tftypes.String, t), nil
|
||||
case st.Is(tftypes.Number):
|
||||
num, err := strconv.Atoi(in.(string))
|
||||
num, err := strconv.Atoi(t)
|
||||
if err != nil {
|
||||
return tftypes.Value{}, err
|
||||
}
|
||||
|
|
@ -53,11 +53,11 @@ func ToTFValue(in interface{}, st tftypes.Type, th map[string]string, at *tftype
|
|||
case int:
|
||||
switch {
|
||||
case st.Is(tftypes.Number) || st.Is(tftypes.DynamicPseudoType):
|
||||
return tftypes.NewValue(tftypes.Number, new(big.Float).SetInt64(int64(in.(int)))), nil
|
||||
return tftypes.NewValue(tftypes.Number, new(big.Float).SetInt64(int64(t))), nil
|
||||
case st.Is(tftypes.String):
|
||||
ht, ok := th[morph.ValueToTypePath(at).String()]
|
||||
if ok && ht == "io.k8s.apimachinery.pkg.util.intstr.IntOrString" { // We store this in state as "string"
|
||||
return tftypes.NewValue(tftypes.String, strconv.FormatInt(int64(in.(int)), 10)), nil
|
||||
return tftypes.NewValue(tftypes.String, strconv.FormatInt(int64(t), 10)), nil
|
||||
}
|
||||
return tftypes.Value{}, at.NewErrorf(`[%s] cannot convert payload from "int" to "tftypes.String"`, at.String())
|
||||
default:
|
||||
|
|
@ -66,11 +66,11 @@ func ToTFValue(in interface{}, st tftypes.Type, th map[string]string, at *tftype
|
|||
case int64:
|
||||
switch {
|
||||
case st.Is(tftypes.Number) || st.Is(tftypes.DynamicPseudoType):
|
||||
return tftypes.NewValue(tftypes.Number, new(big.Float).SetInt64(in.(int64))), nil
|
||||
return tftypes.NewValue(tftypes.Number, new(big.Float).SetInt64(t)), nil
|
||||
case st.Is(tftypes.String):
|
||||
ht, ok := th[morph.ValueToTypePath(at).String()]
|
||||
if ok && ht == "io.k8s.apimachinery.pkg.util.intstr.IntOrString" { // We store this in state as "string"
|
||||
return tftypes.NewValue(tftypes.String, strconv.FormatInt(in.(int64), 10)), nil
|
||||
return tftypes.NewValue(tftypes.String, strconv.FormatInt(t, 10)), nil
|
||||
}
|
||||
return tftypes.Value{}, at.NewErrorf(`[%s] cannot convert payload from "int64" to "tftypes.String"`, at.String())
|
||||
default:
|
||||
|
|
@ -79,11 +79,11 @@ func ToTFValue(in interface{}, st tftypes.Type, th map[string]string, at *tftype
|
|||
case int32:
|
||||
switch {
|
||||
case st.Is(tftypes.Number) || st.Is(tftypes.DynamicPseudoType):
|
||||
return tftypes.NewValue(tftypes.Number, new(big.Float).SetInt64(int64(in.(int32)))), nil
|
||||
return tftypes.NewValue(tftypes.Number, new(big.Float).SetInt64(int64(t))), nil
|
||||
case st.Is(tftypes.String):
|
||||
ht, ok := th[morph.ValueToTypePath(at).String()]
|
||||
if ok && ht == "io.k8s.apimachinery.pkg.util.intstr.IntOrString" { // We store this in state as "string"
|
||||
return tftypes.NewValue(tftypes.String, strconv.FormatInt(int64(in.(int32)), 10)), nil
|
||||
return tftypes.NewValue(tftypes.String, strconv.FormatInt(int64(t), 10)), nil
|
||||
}
|
||||
return tftypes.Value{}, at.NewErrorf(`[%s] cannot convert payload from "int32" to "tftypes.String"`, at.String())
|
||||
default:
|
||||
|
|
@ -92,11 +92,11 @@ func ToTFValue(in interface{}, st tftypes.Type, th map[string]string, at *tftype
|
|||
case int16:
|
||||
switch {
|
||||
case st.Is(tftypes.Number) || st.Is(tftypes.DynamicPseudoType):
|
||||
return tftypes.NewValue(tftypes.Number, new(big.Float).SetInt64(int64(in.(int16)))), nil
|
||||
return tftypes.NewValue(tftypes.Number, new(big.Float).SetInt64(int64(t))), nil
|
||||
case st.Is(tftypes.String):
|
||||
ht, ok := th[morph.ValueToTypePath(at).String()]
|
||||
if ok && ht == "io.k8s.apimachinery.pkg.util.intstr.IntOrString" { // We store this in state as "string"
|
||||
return tftypes.NewValue(tftypes.String, strconv.FormatInt(int64(in.(int16)), 10)), nil
|
||||
return tftypes.NewValue(tftypes.String, strconv.FormatInt(int64(t), 10)), nil
|
||||
}
|
||||
return tftypes.Value{}, at.NewErrorf(`[%s] cannot convert payload from "int16" to "tftypes.String"`, at.String())
|
||||
default:
|
||||
|
|
@ -105,31 +105,31 @@ func ToTFValue(in interface{}, st tftypes.Type, th map[string]string, at *tftype
|
|||
case float64:
|
||||
switch {
|
||||
case st.Is(tftypes.Number) || st.Is(tftypes.DynamicPseudoType):
|
||||
return tftypes.NewValue(tftypes.Number, new(big.Float).SetFloat64(in.(float64))), nil
|
||||
return tftypes.NewValue(tftypes.Number, new(big.Float).SetFloat64(t)), nil
|
||||
default:
|
||||
return tftypes.Value{}, at.NewErrorf(`[%s] cannot convert payload from "float64" to "%s"`, at.String(), st.String())
|
||||
}
|
||||
case []interface{}:
|
||||
switch {
|
||||
case st.Is(tftypes.List{}):
|
||||
return sliceToTFListValue(in.([]interface{}), st, th, at)
|
||||
return sliceToTFListValue(t, st, th, at)
|
||||
case st.Is(tftypes.Tuple{}):
|
||||
return sliceToTFTupleValue(in.([]interface{}), st, th, at)
|
||||
return sliceToTFTupleValue(t, st, th, at)
|
||||
case st.Is(tftypes.Set{}):
|
||||
return sliceToTFSetValue(in.([]interface{}), st, th, at)
|
||||
return sliceToTFSetValue(t, st, th, at)
|
||||
case st.Is(tftypes.DynamicPseudoType):
|
||||
return sliceToTFDynamicValue(in.([]interface{}), st, th, at)
|
||||
return sliceToTFDynamicValue(t, st, th, at)
|
||||
default:
|
||||
return tftypes.Value{}, at.NewErrorf(`[%s] cannot convert payload from "[]interface{}" to "%s"`, at.String(), st.String())
|
||||
}
|
||||
case map[string]interface{}:
|
||||
switch {
|
||||
case st.Is(tftypes.Object{}):
|
||||
return mapToTFObjectValue(in.(map[string]interface{}), st, th, at)
|
||||
return mapToTFObjectValue(t, st, th, at)
|
||||
case st.Is(tftypes.Map{}):
|
||||
return mapToTFMapValue(in.(map[string]interface{}), st, th, at)
|
||||
return mapToTFMapValue(t, st, th, at)
|
||||
case st.Is(tftypes.DynamicPseudoType):
|
||||
return mapToTFDynamicValue(in.(map[string]interface{}), st, th, at)
|
||||
return mapToTFDynamicValue(t, st, th, at)
|
||||
default:
|
||||
return tftypes.Value{}, at.NewErrorf(`[%s] cannot convert payload from "map[string]interface{}" to "%s"`, at.String(), st.String())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -128,7 +128,7 @@ func (ps *RawProviderServer) getOAPIv2Foundry() (openapi.Foundry, error) {
|
|||
func loggingTransport(rt http.RoundTripper) http.RoundTripper {
|
||||
return &loggingRountTripper{
|
||||
ot: rt,
|
||||
lt: logging.NewTransport("Kubernetes API", rt),
|
||||
lt: logging.NewSubsystemLoggingHTTPTransport("Kubernetes API", rt),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -205,7 +205,7 @@ func (s *RawProviderServer) ConfigureProvider(ctx context.Context, req *tfprotov
|
|||
if !providerConfig["client_certificate"].IsNull() && providerConfig["client_certificate"].IsKnown() {
|
||||
err = providerConfig["client_certificate"].As(&clientCertificate)
|
||||
if err != nil {
|
||||
diags = append(diags, &tfprotov5.Diagnostic{
|
||||
response.Diagnostics = append(diags, &tfprotov5.Diagnostic{
|
||||
Severity: tfprotov5.DiagnosticSeverityInvalid,
|
||||
Summary: "Invalid attribute in provider configuration",
|
||||
Detail: "'client_certificate' type cannot be asserted: " + err.Error(),
|
||||
|
|
@ -341,7 +341,7 @@ func (s *RawProviderServer) ConfigureProvider(ctx context.Context, req *tfprotov
|
|||
}
|
||||
hostURL, _, err := rest.DefaultServerURL(host, "", apimachineryschema.GroupVersion{}, defaultTLS)
|
||||
if err != nil {
|
||||
diags = append(diags, &tfprotov5.Diagnostic{
|
||||
response.Diagnostics = append(diags, &tfprotov5.Diagnostic{
|
||||
Severity: tfprotov5.DiagnosticSeverityInvalid,
|
||||
Summary: "Invalid attribute in provider configuration",
|
||||
Detail: "Invalid value for 'host': " + err.Error(),
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@ func (s *RawProviderServer) ReadPluralDataSource(ctx context.Context, req *tfpro
|
|||
}
|
||||
d := tfprotov5.Diagnostic{
|
||||
Severity: tfprotov5.DiagnosticSeverityError,
|
||||
Summary: fmt.Sprintf("Failed to get data source"),
|
||||
Summary: "Failed to get data source",
|
||||
Detail: err.Error(),
|
||||
}
|
||||
resp.Diagnostics = append(resp.Diagnostics, &d)
|
||||
|
|
@ -357,7 +357,7 @@ func (s *RawProviderServer) ReadSingularDataSource(ctx context.Context, req *tfp
|
|||
}
|
||||
d := tfprotov5.Diagnostic{
|
||||
Severity: tfprotov5.DiagnosticSeverityError,
|
||||
Summary: fmt.Sprintf("Failed to get data source"),
|
||||
Summary: "Failed to get data source",
|
||||
Detail: err.Error(),
|
||||
}
|
||||
resp.Diagnostics = append(resp.Diagnostics, &d)
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ func (s *RawProviderServer) getFieldManagerConfig(v map[string]tftypes.Value) (s
|
|||
}
|
||||
|
||||
func isImportedFlagFromPrivate(p []byte) (f bool, d []*tfprotov5.Diagnostic) {
|
||||
if p == nil || len(p) == 0 {
|
||||
if len(p) == 0 {
|
||||
return
|
||||
}
|
||||
ps, err := getPrivateStateValue(p)
|
||||
|
|
@ -501,15 +501,3 @@ func (s *RawProviderServer) PlanResourceChange(ctx context.Context, req *tfproto
|
|||
resp.PlannedState = &plannedState
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func getAttributeValue(v tftypes.Value, path string) (tftypes.Value, error) {
|
||||
p, err := FieldPathToTftypesPath(path)
|
||||
if err != nil {
|
||||
return tftypes.Value{}, err
|
||||
}
|
||||
vv, _, err := tftypes.WalkAttributePath(v, p)
|
||||
if err != nil {
|
||||
return tftypes.Value{}, err
|
||||
}
|
||||
return vv.(tftypes.Value), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ package provider
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
|
@ -82,19 +81,6 @@ func convertReattachConfig(reattachConfig *plugin.ReattachConfig) tfexec.Reattac
|
|||
}
|
||||
}
|
||||
|
||||
// printReattachConfig prints the line the user needs to copy and paste
|
||||
// to set the TF_REATTACH_PROVIDERS variable
|
||||
func printReattachConfig(config *plugin.ReattachConfig) {
|
||||
reattachStr, err := json.Marshal(map[string]tfexec.ReattachConfig{
|
||||
providerName: convertReattachConfig(config),
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Error building reattach string: %s", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("# Provider server started\nexport TF_REATTACH_PROVIDERS='%s'\n", string(reattachStr))
|
||||
}
|
||||
|
||||
// waitForReattachConfig blocks until a ReattachConfig is recieved on the
|
||||
// supplied channel or times out after 2 seconds.
|
||||
func waitForReattachConfig(ch chan *plugin.ReattachConfig) (*plugin.ReattachConfig, error) {
|
||||
|
|
|
|||
|
|
@ -277,6 +277,9 @@ func getPrivateStateValue(p []byte) (ps map[string]tftypes.Value, err error) {
|
|||
return
|
||||
}
|
||||
pv, err := tftypes.ValueFromMsgPack(p, privateStateSchema)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = pv.As(&ps)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -248,9 +248,9 @@ func FieldPathToTftypesPath(fieldPath string) (*tftypes.AttributePath, error) {
|
|||
|
||||
path := tftypes.NewAttributePath()
|
||||
for _, p := range t {
|
||||
switch p.(type) {
|
||||
switch t := p.(type) {
|
||||
case hcl.TraverseRoot:
|
||||
path = path.WithAttributeName(p.(hcl.TraverseRoot).Name)
|
||||
path = path.WithAttributeName(t.Name)
|
||||
case hcl.TraverseIndex:
|
||||
indexKey := p.(hcl.TraverseIndex).Key
|
||||
indexKeyType := indexKey.Type()
|
||||
|
|
@ -268,7 +268,7 @@ func FieldPathToTftypesPath(fieldPath string) (*tftypes.AttributePath, error) {
|
|||
return tftypes.NewAttributePath(), fmt.Errorf("unsupported type in field path: %s", indexKeyType.FriendlyName())
|
||||
}
|
||||
case hcl.TraverseAttr:
|
||||
path = path.WithAttributeName(p.(hcl.TraverseAttr).Name)
|
||||
path = path.WithAttributeName(t.Name)
|
||||
case hcl.TraverseSplat:
|
||||
return tftypes.NewAttributePath(), fmt.Errorf("splat is not supported")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ package plugintest
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
|
|
@ -39,7 +38,7 @@ func DiscoverConfig(ctx context.Context, sourceDir string) (*Config, error) {
|
|||
tfPath := os.Getenv(EnvTfAccTerraformPath)
|
||||
|
||||
tempDir := os.Getenv(EnvTfAccTempDir)
|
||||
tfDir, err := ioutil.TempDir(tempDir, "plugintest-terraform")
|
||||
tfDir, err := os.MkdirTemp(tempDir, "plugintest-terraform")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp dir: %w", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
|
|
@ -73,7 +72,7 @@ func AutoInitHelper(ctx context.Context, sourceDir string) (*Helper, error) {
|
|||
// automatically clean those up.
|
||||
func InitHelper(ctx context.Context, config *Config) (*Helper, error) {
|
||||
tempDir := os.Getenv(EnvTfAccTempDir)
|
||||
baseDir, err := ioutil.TempDir(tempDir, "plugintest")
|
||||
baseDir, err := os.MkdirTemp(tempDir, "plugintest")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temporary directory for test helper: %s", err)
|
||||
}
|
||||
|
|
@ -108,7 +107,7 @@ func (h *Helper) Close() error {
|
|||
// program exits, the Close method on the helper itself will attempt to
|
||||
// delete it.
|
||||
func (h *Helper) NewWorkingDir(ctx context.Context, t TestControl) (*WorkingDir, error) {
|
||||
dir, err := ioutil.TempDir(h.baseDir, "work")
|
||||
dir, err := os.MkdirTemp(h.baseDir, "work")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
|
|
@ -37,9 +37,6 @@ type WorkingDir struct {
|
|||
// was stored; empty until SetConfig is called.
|
||||
configFilename string
|
||||
|
||||
// baseArgs is arguments that should be appended to all commands
|
||||
baseArgs []string
|
||||
|
||||
// tf is the instance of tfexec.Terraform used for running Terraform commands
|
||||
tf *tfexec.Terraform
|
||||
|
||||
|
|
@ -87,7 +84,7 @@ func (wd *WorkingDir) SetConfig(ctx context.Context, cfg string) error {
|
|||
if err := os.Remove(rmFilename); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("unable to remove %q: %w", rmFilename, err)
|
||||
}
|
||||
err := ioutil.WriteFile(outFilename, bCfg, 0700)
|
||||
err := os.WriteFile(outFilename, bCfg, 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -266,7 +263,7 @@ func (wd *WorkingDir) SavedPlanRawStdout(ctx context.Context) (string, error) {
|
|||
var ret bytes.Buffer
|
||||
|
||||
wd.tf.SetStdout(&ret)
|
||||
defer wd.tf.SetStdout(ioutil.Discard)
|
||||
defer wd.tf.SetStdout(io.Discard)
|
||||
|
||||
logging.HelperResourceTrace(ctx, "Calling Terraform CLI show command")
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue