Closed Josh-Diamond closed 2 years ago
Test Plan:
v2.7-head
engine_install_url
on node_template resource]20.10
Workaround for this, in the meantime would be to add the following to the rancher2_node_template resource:
engine_install_url = "https://releases.rancher.com/install-docker/20.10.sh"
@Josh-Diamond This is not ready to test
Dev-validated on rancher v2.7-head
engine_install_url = https://releases.rancher.com/install-docker/20.10.9.sh
- rancher installs docker 20.10.9 on the nodes main.tf
terraform {
required_providers {
rancher2 = {
source = "terraform.example.com/local/rancher2"
version = "1.0.0"
}
}
}
provider "rancher2" {
api_url = var.rancher_api_url
token_key = var.rancher_admin_bearer_token
insecure = true
}
data "rancher2_cloud_credential" "rancher2_cloud_credential" {
name = var.cloud_credential_name
}
resource "rancher2_cluster" "rancher2_cluster" {
name = var.cluster_name
rke_config {
kubernetes_version = "v1.24.4-rancher1-1"
network {
plugin = var.network_plugin
}
}
}
resource "rancher2_node_template" "rancher2_node_template" {
name = var.node_template_name
amazonec2_config {
access_key = var.aws_access_key
secret_key = var.aws_secret_key
region = var.aws_region
ami = var.aws_ami
security_group = [var.aws_security_group_name]
subnet_id = var.aws_subnet_id
vpc_id = var.aws_vpc_id
zone = var.aws_zone_letter
root_size = var.aws_root_size
instance_type = var.aws_instance_type
}
}
resource "rancher2_node_pool" "pool1" {
cluster_id = rancher2_cluster.rancher2_cluster.id
name = "pool1"
hostname_prefix = "tf-pool1-"
node_template_id = rancher2_node_template.rancher2_node_template.id
quantity = 1
control_plane = false
etcd = true
worker = false
}
resource "rancher2_node_pool" "pool2" {
cluster_id = rancher2_cluster.rancher2_cluster.id
name = "pool2"
hostname_prefix = "tf-pool2-"
node_template_id = rancher2_node_template.rancher2_node_template.id
quantity = 1
control_plane = true
etcd = false
worker = false
}
resource "rancher2_node_pool" "pool3" {
cluster_id = rancher2_cluster.rancher2_cluster.id
name = "pool3"
hostname_prefix = "tf-pool3-"
node_template_id = rancher2_node_template.rancher2_node_template.id
quantity = 1
control_plane = false
etcd = false
worker = true
}
terraform.tfvars
# Rancher specific variables
rancher_api_url = "<rancher API url>"
rancher_admin_bearer_token = "<rancher bearer token>"
cloud_credential_name = "<cloud credential name>"
# AWS specific variables
aws_access_key = "<AWS access key>"
aws_secret_key = "<AWS secret key>"
aws_instance_type = "t3.medium"
aws_ami = ""
aws_region = "us-east-2"
aws_security_group_name = "rancher-nodes"
aws_subnet_id = "subnet-xxxxxx"
aws_vpc_id = "vpc-xxxxxxx"
aws_zone_letter = "a"
aws_root_size = "80"
# RKE1 specific variables
cluster_name = "ablender-rke1"
network_plugin = "canal"
node_template_name = "tf-rke1-template"
# Testing specific variables
expected_provider = "rke"
expected_state_active = "active"
RKE1 EC2 cluster, no AMI
RKE1 EC2 cluster, AMI eli-kube-dock-server ami-0e3c799b78951f41b (AMI had docker version 20.10.12)
RKE1 EC2 cluster, no AMI, engine_install_url = https://releases.rancher.com/install-docker/20.10.9.sh
When provisioning a downstream RKE1 cluster w/ docker install from TF rancher2 provider, docker 17.03.2 is installed.
When provisioning a downstream RKE1 cluster w/ docker install from RancherUI, docker 20.10 is installed.
This issue was created to bump the docker install version with TF rancher2 provider to 20.10