mirror of
https://github.com/hashicorp/terraform-provider-kubernetes.git
synced 2025-12-18 23:06:07 -05:00
Clean up test-infra, workflow and tools (#2313)
This commit is contained in:
parent
4dff7d1fd2
commit
3655fa7e50
39 changed files with 141 additions and 1022 deletions
8
.github/workflows/acceptance_tests_aks.yaml
vendored
8
.github/workflows/acceptance_tests_aks.yaml
vendored
|
|
@ -28,7 +28,7 @@ on:
|
|||
- cron: '0 22 * * *'
|
||||
|
||||
env:
|
||||
KUBE_CONFIG_PATH: ${{ github.workspace }}/kubernetes/test-infra/aks-new/kubeconfig
|
||||
KUBE_CONFIG_PATH: ${{ github.workspace }}/kubernetes/test-infra/aks/kubeconfig
|
||||
TERRAFORM_VERSION: ${{ github.event.inputs.terraformVersion || vars.TERRAFORM_VERSION }}
|
||||
PARALLEL_RUNS: ${{ github.event.inputs.parallelRuns || vars.PARALLEL_RUNS }}
|
||||
TF_VAR_location: ${{ github.event.inputs.location || vars.AZURE_LOCATION }}
|
||||
|
|
@ -37,7 +37,7 @@ env:
|
|||
TF_VAR_cluster_version: ${{ github.event.inputs.clusterVersion || vars.CLUSTER_VERSION}}
|
||||
|
||||
jobs:
|
||||
acceptanceTests:
|
||||
acceptance_tests_aks:
|
||||
runs-on: [custom, linux, medium]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
|
@ -56,7 +56,7 @@ jobs:
|
|||
with:
|
||||
creds: ${{ secrets.AZURE_CREDENTIALS }}
|
||||
- name: Provision AKS
|
||||
working-directory: ${{ github.workspace }}/kubernetes/test-infra/aks-new
|
||||
working-directory: ${{ github.workspace }}/kubernetes/test-infra/aks
|
||||
run: |
|
||||
terraform init
|
||||
terraform apply -auto-approve
|
||||
|
|
@ -68,6 +68,6 @@ jobs:
|
|||
make testacc
|
||||
- name: Destroy AKS
|
||||
if: always()
|
||||
working-directory: ${{ github.workspace }}/kubernetes/test-infra/aks-new
|
||||
working-directory: ${{ github.workspace }}/kubernetes/test-infra/aks
|
||||
run: |
|
||||
terraform destroy -auto-approve
|
||||
|
|
|
|||
2
.github/workflows/acceptance_tests_eks.yaml
vendored
2
.github/workflows/acceptance_tests_eks.yaml
vendored
|
|
@ -41,7 +41,7 @@ env:
|
|||
TF_VAR_instance_type: ${{ github.event.inputs.instanceType || vars.AWS_INSTANCE_TYPE }}
|
||||
|
||||
jobs:
|
||||
acceptance_tests:
|
||||
acceptance_tests_eks:
|
||||
runs-on: [custom, linux, medium]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
|
|
|||
2
.github/workflows/acceptance_tests_gke.yaml
vendored
2
.github/workflows/acceptance_tests_gke.yaml
vendored
|
|
@ -43,7 +43,7 @@ env:
|
|||
TF_VAR_instance_type: ${{ github.event.inputs.instanceType || vars.GOOGLE_INSTANCE_TYPE }}
|
||||
|
||||
jobs:
|
||||
acceptance_tests:
|
||||
acceptance_tests_gke:
|
||||
runs-on: [custom, linux, medium]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
|
|
|||
4
.github/workflows/acceptance_tests_kind.yaml
vendored
4
.github/workflows/acceptance_tests_kind.yaml
vendored
|
|
@ -11,7 +11,7 @@ on:
|
|||
default: ".*"
|
||||
terraformVersion:
|
||||
description: Terraform version
|
||||
default: 1.5.3
|
||||
default: 1.5.6
|
||||
parallelRuns:
|
||||
description: The maximum number of tests to run simultaneously
|
||||
default: 8
|
||||
|
|
@ -31,7 +31,7 @@ env:
|
|||
TERRAFORM_VERSION: ${{ github.event.inputs.terraformVersion || vars.TERRAFORM_VERSION }}
|
||||
|
||||
jobs:
|
||||
acceptance_tests:
|
||||
acceptance_tests_kind:
|
||||
runs-on: [custom, linux, medium]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
|
|
|||
|
|
@ -78,7 +78,6 @@ test: fmtcheck
|
|||
go test $(TEST) || exit 1
|
||||
echo $(TEST) | \
|
||||
xargs -t -n4 go test $(TESTARGS) -timeout=30s -parallel=4
|
||||
go test ./tools
|
||||
|
||||
testacc: fmtcheck vet
|
||||
TF_ACC=1 go test $(TEST) -v -vet=off $(TESTARGS) -parallel $(PARALLEL_RUNS) -timeout 3h
|
||||
|
|
|
|||
5
kubernetes/test-infra/aks-new/.gitignore
vendored
5
kubernetes/test-infra/aks-new/.gitignore
vendored
|
|
@ -1,5 +0,0 @@
|
|||
.terraform
|
||||
kubeconfig
|
||||
terraform.tfstate
|
||||
.terraform.lock.hcl
|
||||
terraform.tfstate.backup
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
provider "azurerm" {
|
||||
features {}
|
||||
}
|
||||
|
||||
resource "random_pet" "name" {}
|
||||
|
||||
resource "azurerm_resource_group" "test_group" {
|
||||
name = "test-aks-${random_pet.name.id}"
|
||||
location = var.location
|
||||
}
|
||||
|
||||
resource "azurerm_kubernetes_cluster" "test" {
|
||||
name = "test-aks-${random_pet.name.id}"
|
||||
location = azurerm_resource_group.test_group.location
|
||||
resource_group_name = azurerm_resource_group.test_group.name
|
||||
dns_prefix = "test"
|
||||
kubernetes_version = var.cluster_version
|
||||
|
||||
default_node_pool {
|
||||
name = "default"
|
||||
node_count = var.node_count
|
||||
vm_size = var.vm_size
|
||||
}
|
||||
|
||||
identity {
|
||||
type = "SystemAssigned"
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "kubeconfig" {
|
||||
content = azurerm_kubernetes_cluster.test.kube_config_raw
|
||||
filename = "${path.module}/kubeconfig"
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = azurerm_kubernetes_cluster.test.kube_config_raw
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "cluster_name" {
|
||||
value = "test-aks-${random_pet.name.id}"
|
||||
}
|
||||
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
variable "location" {
|
||||
type = string
|
||||
default = "West Europe"
|
||||
}
|
||||
|
||||
variable "node_count" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "vm_size" {
|
||||
type = string
|
||||
default = "Standard_A4_v2"
|
||||
}
|
||||
|
||||
variable "cluster_version" {
|
||||
type = string
|
||||
default = "1.27"
|
||||
}
|
||||
|
|
@ -1,98 +1,36 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
locals {
|
||||
random_prefix = "${var.prefix}-${random_id.tf-k8s-acc.hex}"
|
||||
}
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
provider "azurerm" {
|
||||
features {}
|
||||
}
|
||||
|
||||
data "azurerm_kubernetes_service_versions" "current" {
|
||||
location = var.location
|
||||
version_prefix = var.kubernetes_version
|
||||
}
|
||||
resource "random_pet" "name" {}
|
||||
|
||||
resource "random_id" "tf-k8s-acc" {
|
||||
byte_length = 3
|
||||
}
|
||||
|
||||
resource "azurerm_resource_group" "tf-k8s-acc" {
|
||||
name = "${local.random_prefix}-rsg"
|
||||
resource "azurerm_resource_group" "test_group" {
|
||||
name = "test-aks-${random_pet.name.id}"
|
||||
location = var.location
|
||||
}
|
||||
|
||||
resource "azurerm_route_table" "tf-k8s-acc" {
|
||||
name = "${local.random_prefix}-rt"
|
||||
location = azurerm_resource_group.tf-k8s-acc.location
|
||||
resource_group_name = azurerm_resource_group.tf-k8s-acc.name
|
||||
|
||||
route {
|
||||
name = "default"
|
||||
address_prefix = "10.100.0.0/14"
|
||||
next_hop_type = "VirtualAppliance"
|
||||
next_hop_in_ip_address = "10.10.1.1"
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_virtual_network" "tf-k8s-acc" {
|
||||
name = "${local.random_prefix}-network"
|
||||
location = azurerm_resource_group.tf-k8s-acc.location
|
||||
resource_group_name = azurerm_resource_group.tf-k8s-acc.name
|
||||
address_space = ["10.1.0.0/16"]
|
||||
}
|
||||
|
||||
resource "azurerm_subnet" "tf-k8s-acc" {
|
||||
name = "${local.random_prefix}-internal"
|
||||
resource_group_name = azurerm_resource_group.tf-k8s-acc.name
|
||||
address_prefixes = ["10.1.0.0/24"]
|
||||
virtual_network_name = azurerm_virtual_network.tf-k8s-acc.name
|
||||
}
|
||||
|
||||
resource "azurerm_subnet_route_table_association" "tf-k8s-acc" {
|
||||
subnet_id = azurerm_subnet.tf-k8s-acc.id
|
||||
route_table_id = azurerm_route_table.tf-k8s-acc.id
|
||||
}
|
||||
|
||||
resource "azurerm_kubernetes_cluster" "tf-k8s-acc" {
|
||||
name = "${local.random_prefix}-cluster"
|
||||
resource_group_name = azurerm_resource_group.tf-k8s-acc.name
|
||||
location = azurerm_resource_group.tf-k8s-acc.location
|
||||
dns_prefix = "${local.random_prefix}-cluster"
|
||||
kubernetes_version = data.azurerm_kubernetes_service_versions.current.latest_version
|
||||
|
||||
# Uncomment to enable SSH access to nodes
|
||||
#
|
||||
# linux_profile {
|
||||
# admin_username = "acctestuser1"
|
||||
# ssh_key {
|
||||
# key_data = "${file(var.public_ssh_key_path)}"
|
||||
# }
|
||||
# }
|
||||
resource "azurerm_kubernetes_cluster" "test" {
|
||||
name = "test-aks-${random_pet.name.id}"
|
||||
location = azurerm_resource_group.test_group.location
|
||||
resource_group_name = azurerm_resource_group.test_group.name
|
||||
dns_prefix = "test"
|
||||
kubernetes_version = var.cluster_version
|
||||
|
||||
default_node_pool {
|
||||
name = "agentpool"
|
||||
node_count = var.workers_count
|
||||
vm_size = var.workers_type
|
||||
os_disk_size_gb = 30
|
||||
|
||||
# Required for advanced networking
|
||||
vnet_subnet_id = azurerm_subnet.tf-k8s-acc.id
|
||||
name = "default"
|
||||
node_count = var.node_count
|
||||
vm_size = var.vm_size
|
||||
}
|
||||
|
||||
|
||||
identity {
|
||||
type = "SystemAssigned"
|
||||
}
|
||||
|
||||
network_profile {
|
||||
network_plugin = "azure"
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "kubeconfig" {
|
||||
content = azurerm_kubernetes_cluster.tf-k8s-acc.kube_config_raw
|
||||
content = azurerm_kubernetes_cluster.test.kube_config_raw
|
||||
filename = "${path.module}/kubeconfig"
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,11 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
output "kubeconfig_path" {
|
||||
value = local_file.kubeconfig.filename
|
||||
output "kubeconfig" {
|
||||
value = azurerm_kubernetes_cluster.test.kube_config_raw
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "cluster_name" {
|
||||
value = azurerm_kubernetes_cluster.tf-k8s-acc.name
|
||||
value = "test-aks-${random_pet.name.id}"
|
||||
}
|
||||
|
|
|
|||
14
kubernetes/test-infra/aks/terraform.tf
Normal file
14
kubernetes/test-infra/aks/terraform.tf
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
|
||||
terraform {
|
||||
required_version = ">= 1.0"
|
||||
|
||||
required_providers {
|
||||
azurerm = {
|
||||
source = "hashicorp/azurerm"
|
||||
version = ">= 3.0.0, < 4.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,33 +1,22 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
variable "prefix" {
|
||||
description = "A prefix used for all resources in this example"
|
||||
default = "tf-k8s-acc"
|
||||
}
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
variable "location" {
|
||||
default = "West Europe"
|
||||
description = "The Azure Region in which all resources in this example should be provisioned"
|
||||
}
|
||||
|
||||
variable "kubernetes_version" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "workers_count" {
|
||||
type = string
|
||||
default = "West Europe"
|
||||
}
|
||||
|
||||
variable "node_count" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "workers_type" {
|
||||
variable "vm_size" {
|
||||
type = string
|
||||
default = "Standard_DS4_v2"
|
||||
default = "Standard_A4_v2"
|
||||
}
|
||||
|
||||
# Uncomment to enable SSH access to nodes
|
||||
#
|
||||
# variable "public_ssh_key_path" {
|
||||
# description = "The Path at which your Public SSH Key is located. Defaults to ~/.ssh/id_rsa.pub"
|
||||
# default = "~/.ssh/id_rsa.pub"
|
||||
#}
|
||||
variable "cluster_version" {
|
||||
type = string
|
||||
default = "1.27"
|
||||
}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
}
|
||||
14
kubernetes/test-infra/aws-ebs-csi-driver/versions.tf
Normal file
14
kubernetes/test-infra/aws-ebs-csi-driver/versions.tf
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
|
||||
terraform {
|
||||
required_version = ">= 1.0"
|
||||
|
||||
required_providers {
|
||||
google = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 5.0.0, < 6.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
# EKS (Amazon Elastic Kubernetes Service)
|
||||
|
||||
This example demonstrates the most reliable way to use the Kubernetes provider together with the AWS provider to create an EKS cluster. By keeping the two providers' resources in separate Terraform states (or separate workspaces using [Terraform Cloud](https://app.terraform.io/)), we can limit the scope of impact to apply the right changes to the right place. (For example, updating the underlying EKS infrastructure without having to navigate the Kubernetes provider configuration challenges caused by modifying EKS cluster attributes in a single apply).
|
||||
|
||||
You will need the following environment variables to be set:
|
||||
|
||||
- `AWS_ACCESS_KEY_ID`
|
||||
- `AWS_SECRET_ACCESS_KEY`
|
||||
|
||||
See [AWS Provider docs](https://www.terraform.io/docs/providers/aws/index.html#configuration-reference) for more details about these variables and alternatives, like `AWS_PROFILE`.
|
||||
|
||||
|
||||
## Create EKS cluster
|
||||
|
||||
Choose a name for the cluster, or use the terraform config in the current directory to create a random name.
|
||||
|
||||
```
|
||||
terraform init
|
||||
terraform apply --auto-approve
|
||||
export CLUSTERNAME=$(terraform output -raw cluster_name)
|
||||
```
|
||||
|
||||
Change into the eks-cluster directory and create the EKS cluster infrastrcture.
|
||||
|
||||
```
|
||||
cd eks-cluster
|
||||
terraform init
|
||||
terraform apply -var=cluster_name=$CLUSTERNAME
|
||||
cd -
|
||||
```
|
||||
|
||||
Optionally, the Kubernetes version can be specified at apply time:
|
||||
|
||||
```
|
||||
terraform apply -var=kubernetes_version=1.18
|
||||
```
|
||||
|
||||
See https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html for currently available versions.
|
||||
|
||||
|
||||
## Create Kubernetes resources
|
||||
|
||||
Change into the kubernetes-config directory to apply Kubernetes resources to the new cluster.
|
||||
|
||||
```
|
||||
cd kubernetes-config
|
||||
terraform init
|
||||
terraform apply -var=cluster_name=$CLUSTERNAME
|
||||
```
|
||||
|
||||
## Deleting the cluster
|
||||
|
||||
First, delete the Kubernetes resources as shown below. This will give Ingress and Service related Load Balancers a chance to delete before the other AWS resources are removed.
|
||||
|
||||
```
|
||||
cd kubernetes-config
|
||||
terraform destroy -var=cluster_name=$CLUSTERNAME
|
||||
cd -
|
||||
```
|
||||
|
||||
Then delete the EKS related resources:
|
||||
|
||||
```
|
||||
cd eks-cluster
|
||||
terraform destroy -var=cluster_name=$CLUSTERNAME
|
||||
cd -
|
||||
```
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
resource "aws_eks_cluster" "k8s-acc" {
|
||||
name = var.cluster_name
|
||||
role_arn = aws_iam_role.k8s-acc-cluster.arn
|
||||
|
||||
vpc_config {
|
||||
subnet_ids = aws_subnet.k8s-acc.*.id
|
||||
}
|
||||
|
||||
# Ensure that IAM Role permissions are created before and deleted after EKS Cluster handling.
|
||||
# Otherwise, EKS will not be able to properly delete EKS managed EC2 infrastructure such as Security Groups.
|
||||
depends_on = [
|
||||
aws_iam_role_policy_attachment.k8s-acc-AmazonEKSClusterPolicy,
|
||||
aws_iam_role_policy_attachment.k8s-acc-AmazonEKSVPCResourceController,
|
||||
]
|
||||
}
|
||||
|
||||
resource "aws_eks_node_group" "k8s-acc" {
|
||||
cluster_name = aws_eks_cluster.k8s-acc.name
|
||||
node_group_name = var.cluster_name
|
||||
node_role_arn = aws_iam_role.k8s-acc-node.arn
|
||||
subnet_ids = aws_subnet.k8s-acc.*.id
|
||||
|
||||
scaling_config {
|
||||
desired_size = 1
|
||||
max_size = 1
|
||||
min_size = 1
|
||||
}
|
||||
|
||||
# Ensure that IAM Role permissions are created before and deleted after EKS Node Group handling.
|
||||
# Otherwise, EKS will not be able to properly delete EC2 Instances and Elastic Network Interfaces.
|
||||
depends_on = [
|
||||
aws_iam_role_policy_attachment.k8s-acc-AmazonEKSWorkerNodePolicy,
|
||||
aws_iam_role_policy_attachment.k8s-acc-AmazonEKS_CNI_Policy,
|
||||
aws_iam_role_policy_attachment.k8s-acc-AmazonEC2ContainerRegistryReadOnly,
|
||||
]
|
||||
}
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
resource "aws_iam_role" "k8s-acc-cluster" {
|
||||
name = var.cluster_name
|
||||
|
||||
assume_role_policy = <<POLICY
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Service": "eks.amazonaws.com"
|
||||
},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
||||
POLICY
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "k8s-acc-AmazonEKSClusterPolicy" {
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
|
||||
role = aws_iam_role.k8s-acc-cluster.name
|
||||
}
|
||||
|
||||
# Optionally, enable Security Groups for Pods
|
||||
# Reference: https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html
|
||||
resource "aws_iam_role_policy_attachment" "k8s-acc-AmazonEKSVPCResourceController" {
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
|
||||
role = aws_iam_role.k8s-acc-cluster.name
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "k8s-acc-node" {
|
||||
name = "${var.cluster_name}-node"
|
||||
|
||||
assume_role_policy = jsonencode({
|
||||
Statement = [{
|
||||
Action = "sts:AssumeRole"
|
||||
Effect = "Allow"
|
||||
Principal = {
|
||||
Service = "ec2.amazonaws.com"
|
||||
}
|
||||
}]
|
||||
Version = "2012-10-17"
|
||||
})
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "k8s-acc-AmazonEKSWorkerNodePolicy" {
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
|
||||
role = aws_iam_role.k8s-acc-node.name
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "k8s-acc-AmazonEKS_CNI_Policy" {
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
|
||||
role = aws_iam_role.k8s-acc-node.name
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "k8s-acc-AmazonEC2ContainerRegistryReadOnly" {
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
|
||||
role = aws_iam_role.k8s-acc-node.name
|
||||
}
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
}
|
||||
|
|
@ -1,59 +0,0 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
data "aws_region" "current" {
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
resource "aws_vpc" "k8s-acc" {
|
||||
cidr_block = "10.0.0.0/16"
|
||||
enable_dns_support = true
|
||||
enable_dns_hostnames = true
|
||||
tags = {
|
||||
"Name" = "terraform-eks-k8s-acc-node"
|
||||
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_subnet" "k8s-acc" {
|
||||
count = 2
|
||||
|
||||
availability_zone = data.aws_availability_zones.available.names[count.index]
|
||||
cidr_block = "10.0.${count.index}.0/24"
|
||||
vpc_id = aws_vpc.k8s-acc.id
|
||||
map_public_ip_on_launch = true
|
||||
|
||||
tags = {
|
||||
"Name" = "terraform-eks-k8s-acc-node"
|
||||
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
|
||||
"kubernetes.io/role/elb" = 1
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_internet_gateway" "k8s-acc" {
|
||||
vpc_id = aws_vpc.k8s-acc.id
|
||||
|
||||
tags = {
|
||||
Name = "terraform-eks-k8s-acc"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_route_table" "k8s-acc" {
|
||||
vpc_id = aws_vpc.k8s-acc.id
|
||||
|
||||
route {
|
||||
cidr_block = "0.0.0.0/0"
|
||||
gateway_id = aws_internet_gateway.k8s-acc.id
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_route_table_association" "k8s-acc" {
|
||||
count = 2
|
||||
|
||||
subnet_id = aws_subnet.k8s-acc[count.index].id
|
||||
route_table_id = aws_route_table.k8s-acc.id
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
apiVersion: v1
|
||||
preferences: {}
|
||||
kind: Config
|
||||
|
||||
clusters:
|
||||
- cluster:
|
||||
server: ${endpoint}
|
||||
certificate-authority-data: ${clusterca}
|
||||
name: ${cluster_name}
|
||||
|
||||
contexts:
|
||||
- context:
|
||||
cluster: ${cluster_name}
|
||||
user: ${cluster_name}
|
||||
name: ${cluster_name}
|
||||
|
||||
current-context: ${cluster_name}
|
||||
|
||||
users:
|
||||
- name: ${cluster_name}
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1alpha1
|
||||
command: aws-iam-authenticator
|
||||
args:
|
||||
- token
|
||||
- --cluster-id
|
||||
- ${cluster_name}
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
data "aws_eks_cluster" "default" {
|
||||
name = var.cluster_name
|
||||
}
|
||||
|
||||
data "aws_eks_cluster_auth" "default" {
|
||||
name = var.cluster_name
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = data.aws_eks_cluster.default.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.default.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.default.token
|
||||
}
|
||||
|
||||
provider "helm" {
|
||||
kubernetes {
|
||||
host = data.aws_eks_cluster.default.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.default.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.default.token
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "kubeconfig" {
|
||||
sensitive_content = templatefile("${path.module}/kubeconfig.tpl", {
|
||||
cluster_name = var.cluster_name,
|
||||
clusterca = data.aws_eks_cluster.default.certificate_authority[0].data,
|
||||
endpoint = data.aws_eks_cluster.default.endpoint,
|
||||
})
|
||||
filename = "./kubeconfig-${var.cluster_name}"
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "test" {
|
||||
metadata {
|
||||
name = "test"
|
||||
}
|
||||
}
|
||||
|
||||
resource "helm_release" "nginx_ingress" {
|
||||
namespace = kubernetes_namespace.test.metadata.0.name
|
||||
wait = true
|
||||
timeout = 600
|
||||
|
||||
name = "ingress-nginx"
|
||||
|
||||
repository = "https://kubernetes.github.io/ingress-nginx"
|
||||
chart = "ingress-nginx"
|
||||
version = "v3.30.0"
|
||||
}
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
output "kubeconfig" {
|
||||
value = abspath("${path.root}/${local_file.kubeconfig.filename}")
|
||||
}
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
}
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "~> 2.1.0"
|
||||
}
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 3.39.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
output "cluster_name" {
|
||||
value = local.cluster_name
|
||||
}
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
resource "random_id" "cluster_name" {
|
||||
byte_length = 2
|
||||
prefix = "k8s-acc-"
|
||||
}
|
||||
|
||||
locals {
|
||||
cluster_name = random_id.cluster_name.hex
|
||||
}
|
||||
|
|
@ -1,13 +1,14 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
|
||||
terraform {
|
||||
required_version = ">= 1.0"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "3.38.0"
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 5.0.0, < 6.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1,26 +1,6 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
variable "cluster_version" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "node_count" {
|
||||
default = "1"
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
default = "e2-standard-2"
|
||||
}
|
||||
|
||||
variable "enable_alpha" {
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
data "google_compute_zones" "available" {
|
||||
}
|
||||
|
||||
|
|
@ -127,19 +107,3 @@ resource "local_file" "kubeconfig" {
|
|||
content = yamlencode(local.kubeconfig)
|
||||
filename = "${path.module}/kubeconfig"
|
||||
}
|
||||
|
||||
output "google_zone" {
|
||||
value = data.google_compute_zones.available.names[0]
|
||||
}
|
||||
|
||||
output "node_version" {
|
||||
value = google_container_cluster.primary.node_version
|
||||
}
|
||||
|
||||
output "kubeconfig_path" {
|
||||
value = local_file.kubeconfig.filename
|
||||
}
|
||||
|
||||
output "cluster_name" {
|
||||
value = google_container_cluster.primary.name
|
||||
}
|
||||
|
|
|
|||
18
kubernetes/test-infra/gke/outputs.tf
Normal file
18
kubernetes/test-infra/gke/outputs.tf
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
output "google_zone" {
|
||||
value = data.google_compute_zones.available.names[0]
|
||||
}
|
||||
|
||||
output "node_version" {
|
||||
value = google_container_cluster.primary.node_version
|
||||
}
|
||||
|
||||
output "kubeconfig_path" {
|
||||
value = local_file.kubeconfig.filename
|
||||
}
|
||||
|
||||
output "cluster_name" {
|
||||
value = google_container_cluster.primary.name
|
||||
}
|
||||
22
kubernetes/test-infra/gke/variables.tf
Normal file
22
kubernetes/test-infra/gke/variables.tf
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
variable "cluster_version" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "node_count" {
|
||||
default = "1"
|
||||
}
|
||||
|
||||
variable "instance_type" {
|
||||
default = "e2-standard-2"
|
||||
}
|
||||
|
||||
variable "enable_alpha" {
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
default = ""
|
||||
}
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
# LKE (Linode Kubernetes Engine)
|
||||
|
||||
You will need to have the `LINODE_API_TOKEN` environment variable set to authenticate with the API. See the [Linode Terraform Provider docs](https://www.terraform.io/docs/providers/linode/index.html) for more information.
|
||||
|
||||
## Versions
|
||||
|
||||
Determine the supported Kubernetes versions via the Linode CLI.
|
||||
|
||||
```
|
||||
linode-cli lke versions-list
|
||||
```
|
||||
|
||||
Additionally, you can use the following API endpoint.
|
||||
|
||||
```sh
|
||||
curl https://api.linode.com/v4/lke/versions
|
||||
```
|
||||
|
||||
## Linode worker node types
|
||||
|
||||
Determine the supported Linode instance types to spin up as worker nodes via the following API endpoint.
|
||||
|
||||
```sh
|
||||
curl https://api.linode.com/v4/linode/types
|
||||
```
|
||||
|
||||
## Variables
|
||||
|
||||
The following variables can be set via their Environment variable bindings.
|
||||
|
||||
- `TF_VAR_kubernetes_version`
|
||||
- `TF_VAR_workers_count` - amount of Linodes to spin up for cluster.
|
||||
- `TF_VAR_workers_type` - type of Linodes to spin up for cluster.
|
||||
|
||||
Export values for them or pass them to the apply command line.
|
||||
|
||||
## Build the cluster
|
||||
|
||||
```
|
||||
terraform init
|
||||
LINODE_API_TOKEN="XXXXXXXXXXXXXXXX" \
|
||||
TF_VAR_kubernetes_version=1.17 \
|
||||
TF_VAR_workers_count=3 \
|
||||
TF_VAR_workers_type=g6-standard-2 \
|
||||
terraform apply --auto-approve
|
||||
```
|
||||
|
||||
## Acceptance test usage
|
||||
|
||||
The path to the resulting kubeconfig file to access the provisioned cluster will be provided under the output name `kubeconfig_path`.
|
||||
|
||||
```sh
|
||||
export KUBECONFIG="$(terraform output kubeconfig_path)"
|
||||
```
|
||||
|
||||
Now you can access the cluster via `kubectl` and you can run acceptance tests against it.
|
||||
|
||||
To run acceptance tests, your the following command in the root of the repository.
|
||||
|
||||
```sh
|
||||
TESTARGS="-run '^TestAcc'" make testacc
|
||||
```
|
||||
|
||||
To run only a specific set of tests, you can replace ^TestAcc with any regular expression to filter tests by name. For example, to run tests for Pod resources, you can do:
|
||||
|
||||
```sh
|
||||
TESTARGS="-run '^TestAccKubernetesPod_'" make testacc
|
||||
```
|
||||
|
|
@ -1,50 +0,0 @@
|
|||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
provider "linode" {
|
||||
// Provider settings to be provided via ENV variables
|
||||
}
|
||||
|
||||
locals {
|
||||
namePrefix = "tf-acc-test"
|
||||
}
|
||||
|
||||
resource "random_id" "cluster_label" {
|
||||
byte_length = 10
|
||||
}
|
||||
|
||||
variable "kubernetes_version" {
|
||||
type = string
|
||||
default = "1.17"
|
||||
}
|
||||
|
||||
variable "workers_type" {
|
||||
type = string
|
||||
default = "g6-standard-2"
|
||||
}
|
||||
|
||||
variable "workers_count" {
|
||||
type = number
|
||||
default = 3
|
||||
}
|
||||
|
||||
resource "linode_lke_cluster" "cluster" {
|
||||
label = "${local.namePrefix}-${random_id.cluster_label.id}"
|
||||
region = "us-east"
|
||||
k8s_version = var.kubernetes_version
|
||||
tags = ["acc-test"]
|
||||
|
||||
pool {
|
||||
type = var.workers_type
|
||||
count = var.workers_count
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "kubeconfig" {
|
||||
content = base64decode(linode_lke_cluster.cluster.kubeconfig)
|
||||
filename = "kubeconfig"
|
||||
}
|
||||
|
||||
output "kubeconfig_path" {
|
||||
value = "${path.cwd}/${local_file.kubeconfig.filename}"
|
||||
}
|
||||
|
|
@ -10,22 +10,22 @@ resource "tls_private_key" "typhoon-acc" {
|
|||
}
|
||||
|
||||
resource "local_file" "public_key_openssh" {
|
||||
content = tls_private_key.typhoon-acc.public_key_openssh
|
||||
filename = "${path.cwd}/${var.cluster_name}.pub"
|
||||
content = tls_private_key.typhoon-acc.public_key_openssh
|
||||
filename = "${path.cwd}/${var.cluster_name}.pub"
|
||||
}
|
||||
|
||||
resource "local_file" "private_key_pem" {
|
||||
content = tls_private_key.typhoon-acc.private_key_pem
|
||||
filename = "${path.cwd}/${var.cluster_name}"
|
||||
content = tls_private_key.typhoon-acc.private_key_pem
|
||||
filename = "${path.cwd}/${var.cluster_name}"
|
||||
}
|
||||
|
||||
resource "null_resource" "ssh-key" {
|
||||
provisioner "local-exec" {
|
||||
command = format("chmod 600 %v", local_file.private_key_pem.filename)
|
||||
command = format("chmod 600 %v", local_file.private_key_pem.filename)
|
||||
working_dir = path.cwd
|
||||
}
|
||||
provisioner "local-exec" {
|
||||
command = format("ssh-add %v", local_file.private_key_pem.filename)
|
||||
command = format("ssh-add %v", local_file.private_key_pem.filename)
|
||||
working_dir = path.cwd
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,17 +2,17 @@
|
|||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
locals {
|
||||
# This local gets a value of 1 when the 'kubernetes_version' input variable requests a 1.19.x version, otherwise it is 0.
|
||||
# It's used to enable the module and resources specific to 1.19.x as a workaround for not being able
|
||||
# to interpolate variables in the 'source' attribute of a module block.
|
||||
#
|
||||
enabled_1_19 = length(regexall("v?1.19.?[0-9]{0,2}", var.kubernetes_version))
|
||||
# This local gets a value of 1 when the 'kubernetes_version' input variable requests a 1.19.x version, otherwise it is 0.
|
||||
# It's used to enable the module and resources specific to 1.19.x as a workaround for not being able
|
||||
# to interpolate variables in the 'source' attribute of a module block.
|
||||
#
|
||||
enabled_1_19 = length(regexall("v?1.19.?[0-9]{0,2}", var.kubernetes_version))
|
||||
}
|
||||
|
||||
# This module builds a 1.19.x Typhoon cluster. It is mutually exlusive to other modules of different versions.
|
||||
#
|
||||
module "typhoon-acc-1_19" {
|
||||
count = local.enabled_1_19
|
||||
count = local.enabled_1_19
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/flatcar-linux/kubernetes?ref=v1.19.4"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
|
|
@ -22,14 +22,14 @@ module "typhoon-acc-1_19" {
|
|||
# node configuration
|
||||
ssh_authorized_key = tls_private_key.typhoon-acc.public_key_openssh
|
||||
|
||||
worker_count = var.worker_count
|
||||
worker_count = var.worker_count
|
||||
controller_count = var.controller_count
|
||||
worker_type = var.controller_type
|
||||
controller_type = var.worker_type
|
||||
worker_type = var.controller_type
|
||||
controller_type = var.worker_type
|
||||
}
|
||||
|
||||
resource "local_file" "typhoon-acc-1_19" {
|
||||
count = local.enabled_1_19
|
||||
count = local.enabled_1_19
|
||||
content = module.typhoon-acc-1_19[0].kubeconfig-admin
|
||||
filename = local.kubeconfig_path
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,17 +2,17 @@
|
|||
# SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
locals {
|
||||
# This local gets a value of 1 when the 'kubernetes_version' input variable requests a 1.20.x version, otherwise it is 0.
|
||||
# It's used to enable the module and resources specific to 1.20.x as a workaround for not being able
|
||||
# to interpolate variables in the 'source' attribute of a module block.
|
||||
#
|
||||
enabled_1_20 = length(regexall("v?1.20.?[0-9]{0,2}", var.kubernetes_version))
|
||||
# This local gets a value of 1 when the 'kubernetes_version' input variable requests a 1.20.x version, otherwise it is 0.
|
||||
# It's used to enable the module and resources specific to 1.20.x as a workaround for not being able
|
||||
# to interpolate variables in the 'source' attribute of a module block.
|
||||
#
|
||||
enabled_1_20 = length(regexall("v?1.20.?[0-9]{0,2}", var.kubernetes_version))
|
||||
}
|
||||
|
||||
# This module builds a 1.20.x Typhoon cluster. It is mutually exlusive to other modules of different versions.
|
||||
#
|
||||
module "typhoon-acc-1_20" {
|
||||
count = local.enabled_1_20
|
||||
count = local.enabled_1_20
|
||||
source = "git::https://github.com/poseidon/typhoon//aws/flatcar-linux/kubernetes?ref=v1.20.2"
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
|
|
@ -22,14 +22,14 @@ module "typhoon-acc-1_20" {
|
|||
# node configuration
|
||||
ssh_authorized_key = tls_private_key.typhoon-acc.public_key_openssh
|
||||
|
||||
worker_count = var.worker_count
|
||||
worker_count = var.worker_count
|
||||
controller_count = var.controller_count
|
||||
worker_type = var.controller_type
|
||||
controller_type = var.worker_type
|
||||
worker_type = var.controller_type
|
||||
controller_type = var.worker_type
|
||||
}
|
||||
|
||||
resource "local_file" "typhoon-acc-1_20" {
|
||||
count = local.enabled_1_20
|
||||
count = local.enabled_1_20
|
||||
content = module.typhoon-acc-1_20[0].kubeconfig-admin
|
||||
filename = local.kubeconfig_path
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ variable "cluster_name" {
|
|||
}
|
||||
|
||||
variable "kubernetes_version" {
|
||||
type = string
|
||||
default = "1.20.2"
|
||||
type = string
|
||||
default = "1.20.2"
|
||||
}
|
||||
|
||||
variable "controller_count" {
|
||||
|
|
|
|||
|
|
@ -1,119 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
// This tool categorizes a set of test names (camel-cased strings) into reduced set of prefixes
|
||||
// Input data is read from stdin as a newline separated list of test names (as generated by the 'go test -list' command)
|
||||
// Output is a JSON formated list of prefixes, to be conveniently used by GH actions 'fromJson()' function.
|
||||
// THe "depth" of output prefixes is the number of segments into the cammel-cased string from the common root.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
func main() {
|
||||
depth := flag.Int("depth", 0, "The depth in the tree (in segments, from zero) to aggregate output prefixes by")
|
||||
srt := flag.Bool("sort", false, "Sorts the output elements lexicographically")
|
||||
flag.Parse()
|
||||
|
||||
b := bufio.NewReader(os.Stdin)
|
||||
pt := PrefixTree{}
|
||||
|
||||
for {
|
||||
dat, err := b.ReadString('\n')
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println("failed to parse input: ", err)
|
||||
os.Exit(100)
|
||||
}
|
||||
pt.addString(strings.TrimSuffix(dat, "\n"))
|
||||
}
|
||||
|
||||
pfx := pt.prefixesToDepth(*depth)
|
||||
if *srt {
|
||||
sort.Strings(pfx)
|
||||
}
|
||||
|
||||
// print out json
|
||||
js := bytes.NewBuffer([]byte{})
|
||||
enc := json.NewEncoder(js)
|
||||
enc.Encode(pfx)
|
||||
fmt.Print(js.String())
|
||||
}
|
||||
|
||||
func tokenizeCamelCase(s string) (out []string) {
|
||||
for len(s) > 0 {
|
||||
nx := 0
|
||||
for i := 1; i < len(s); i++ {
|
||||
nx = strings.IndexFunc(s[i:], func(r rune) bool {
|
||||
return unicode.IsUpper(r) || r == '_'
|
||||
})
|
||||
if nx == -1 {
|
||||
out = append(out, strings.TrimPrefix(s, "_"))
|
||||
return
|
||||
}
|
||||
if nx != 0 {
|
||||
nx = nx + i
|
||||
break
|
||||
}
|
||||
if i == len(s)-1 {
|
||||
nx = len(s)
|
||||
}
|
||||
}
|
||||
out = append(out, strings.TrimPrefix(s[:nx], "_"))
|
||||
s = s[nx:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type PrefixTree map[string]PrefixTree
|
||||
|
||||
func (pt PrefixTree) addString(s string) {
|
||||
if len(s) == 0 {
|
||||
return
|
||||
}
|
||||
pt.addTokenized(tokenizeCamelCase(s))
|
||||
}
|
||||
|
||||
func (pt PrefixTree) addTokenized(s []string) {
|
||||
if len(s) < 1 {
|
||||
return
|
||||
}
|
||||
var st PrefixTree
|
||||
st, ok := pt[s[0]]
|
||||
if !ok {
|
||||
st = make(PrefixTree)
|
||||
}
|
||||
st.addTokenized(s[1:])
|
||||
pt[s[0]] = st
|
||||
}
|
||||
|
||||
func (pt PrefixTree) prefixesToDepth(d int) (px []string) {
|
||||
if d == 0 {
|
||||
for k := range pt {
|
||||
px = append(px, k)
|
||||
}
|
||||
return
|
||||
}
|
||||
for k := range pt {
|
||||
sfx := pt[k].prefixesToDepth(d - 1)
|
||||
if len(sfx) == 0 {
|
||||
px = append(px, k)
|
||||
}
|
||||
for _, s := range sfx {
|
||||
px = append(px, k+s)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -1,167 +0,0 @@
|
|||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: MPL-2.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
func TestTokenizeCamelCase(t *testing.T) {
|
||||
samples := map[string][]string{
|
||||
"HelloWorld": {"Hello", "World"},
|
||||
"hello-world": {"hello-world"},
|
||||
"TestAccKubernetesIngress_TLS": {"Test", "Acc", "Kubernetes", "Ingress", "TLS"},
|
||||
"TestAccKubernetesCSIDriver_basic": {"Test", "Acc", "Kubernetes", "CSIDriver", "basic"},
|
||||
"TestAccKubernetesAPIService_basic": {"Test", "Acc", "Kubernetes", "APIService", "basic"},
|
||||
"TestAccKubernetesCertificateSigningRequest_basic": {"Test", "Acc", "Kubernetes", "Certificate", "Signing", "Request", "basic"},
|
||||
"TestAccKubernetesPod_with_node_affinity_with_required_during_scheduling_ignored_during_execution": {"Test", "Acc", "Kubernetes", "Pod", "with", "node", "affinity", "with", "required", "during", "scheduling", "ignored", "during", "execution"},
|
||||
}
|
||||
for s, r := range samples {
|
||||
t.Run(s, func(t *testing.T) {
|
||||
res := tokenizeCamelCase(s)
|
||||
fail := len(res) != len(r)
|
||||
for ri := range res {
|
||||
if fail {
|
||||
break
|
||||
}
|
||||
fail = res[ri] != r[ri]
|
||||
}
|
||||
if fail {
|
||||
t.Errorf("Sample '%s' failed.\n\tWanted:\t%s\n\tActual:\t%v\n", s, r, res)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddString(t *testing.T) {
|
||||
samples := []struct {
|
||||
In []string
|
||||
Out PrefixTree
|
||||
}{
|
||||
{
|
||||
In: []string{
|
||||
"HelloWorld",
|
||||
"HelloWonderfulWorld",
|
||||
"HelloWorldWonder",
|
||||
"GoodbyeCruelWorld",
|
||||
},
|
||||
Out: PrefixTree{
|
||||
"Hello": {
|
||||
"World": {
|
||||
"Wonder": {},
|
||||
},
|
||||
"Wonderful": {
|
||||
"World": {},
|
||||
},
|
||||
},
|
||||
"Goodbye": {
|
||||
"Cruel": {
|
||||
"World": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
In: []string{
|
||||
"TestAccKubernetesCSIDriver_basic",
|
||||
"TestAccKubernetesAPIService_basic",
|
||||
"TestAccKubernetesCertificateSigningRequest_basic",
|
||||
"TestAccKubernetesClusterRole_basic",
|
||||
},
|
||||
Out: PrefixTree{
|
||||
"Test": {
|
||||
"Acc": {
|
||||
"Kubernetes": {
|
||||
"CSIDriver": {
|
||||
"basic": {},
|
||||
},
|
||||
"APIService": {
|
||||
"basic": {},
|
||||
},
|
||||
"Certificate": {
|
||||
"Signing": {
|
||||
"Request": {
|
||||
"basic": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
"Cluster": {
|
||||
"Role": {
|
||||
"basic": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for k, s := range samples {
|
||||
tt := PrefixTree{}
|
||||
for _, w := range s.In {
|
||||
tt.addString(w)
|
||||
}
|
||||
if !cmp.Equal(s.Out, tt) {
|
||||
t.Errorf("Sample %d failed.\n\tWanted:\t%v\n\tActual:\t%v\n", k, s.Out, tt)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrefixesToDepth(t *testing.T) {
|
||||
type depthSample struct {
|
||||
D int
|
||||
P []string
|
||||
}
|
||||
samples := []struct {
|
||||
In []string
|
||||
Out []depthSample
|
||||
}{
|
||||
{
|
||||
In: []string{
|
||||
"HelloWorld",
|
||||
"HelloWonderfulWorld",
|
||||
"HelloWorldWonder",
|
||||
"GoodbyeCruelWorld",
|
||||
},
|
||||
Out: []depthSample{
|
||||
{
|
||||
D: 1,
|
||||
P: []string{"HelloWorld", "HelloWonderful", "GoodbyeCruel"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
In: []string{
|
||||
"TestAccKubernetesCSIDriver_basic",
|
||||
"TestAccKubernetesAPIService_basic",
|
||||
"TestAccKubernetesCertificateSigningRequest_basic",
|
||||
"TestAccKubernetesClusterRole_basic",
|
||||
},
|
||||
Out: []depthSample{
|
||||
{
|
||||
D: 3,
|
||||
P: []string{"TestAccKubernetesCSIDriver", "TestAccKubernetesAPIService", "TestAccKubernetesCertificate", "TestAccKubernetesCluster"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, s := range samples {
|
||||
tt := PrefixTree{}
|
||||
for _, w := range s.In {
|
||||
tt.addString(w)
|
||||
}
|
||||
for _, ds := range s.Out {
|
||||
out := tt.prefixesToDepth(ds.D)
|
||||
sort.Strings(out)
|
||||
sort.Strings(ds.P)
|
||||
if !cmp.Equal(out, ds.P) {
|
||||
t.Errorf("Sample for depth %d failed!\n\tWanted:%v\n\tActual:%v", ds.D, ds.P, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
Reference in a new issue