Provider version: rancher/rancher2 v1.21.0
Terraform Version: 1.0.0
I have the below terraform plan to terraform a rke cluster in Rancher ontop of openstack.
Howerver i noticed that i get a pretty large latency on my etcd nodes when terraform instead of using to gui to create the cluster. I get numbers around the 3-400s in ms on the fsync value where i should be is below 10ms at least.
In the cluster that ive as a reference cluster i have number way below 10ms, i usually lay around 1.5 to 2 ms which perfect. The cluster is provisioned on the same hardware and yes i have ssd for the etcd nodes 😄 .
Anyone that can lend me an eye or two see where i go wrong in my terraforming?
Provider version: rancher/rancher2 v1.21.0 Terraform Version: 1.0.0 I have the below terraform plan to terraform a rke cluster in Rancher ontop of openstack. Howerver i noticed that i get a pretty large latency on my etcd nodes when terraform instead of using to gui to create the cluster. I get numbers around the 3-400s in ms on the fsync value where i should be is below 10ms at least. In the cluster that ive as a reference cluster i have number way below 10ms, i usually lay around 1.5 to 2 ms which perfect. The cluster is provisioned on the same hardware and yes i have ssd for the etcd nodes 😄 .
Anyone that can lend me an eye or two see where i go wrong in my terraforming?
helm_release.cilium will be created
resource "helm_release" "cilium" {
local_file.kube_config_openstack will be created
resource "local_file" "kube_config_openstack" {
rancher2_cluster.cluster_openstack will be created
resource "rancher2_cluster" "cluster_openstack" {
annotations = (known after apply)
ca_cert = (sensitive value)
cluster_registration_token = (known after apply)
default_pod_security_policy_template_id = (known after apply)
default_project_id = (known after apply)
description = "Terraform"
desired_agent_image = (known after apply)
desired_auth_image = (known after apply)
docker_root_dir = (known after apply)
driver = (known after apply)
enable_cluster_alerting = (known after apply)
enable_cluster_istio = (known after apply)
enable_cluster_monitoring = (known after apply)
enable_network_policy = (known after apply)
fleet_workspace_name = (known after apply)
id = (known after apply)
istio_enabled = (known after apply)
kube_config = (sensitive value)
labels = (known after apply)
name = (known after apply)
system_project_id = (known after apply)
windows_prefered_cluster = false
cluster_auth_endpoint {
cluster_template_answers {
cluster_template_questions {
eks_config_v2 {
cloud_credential_id = (known after apply)
imported = (known after apply)
kms_key = (known after apply)
kubernetes_version = (known after apply)
logging_types = (known after apply)
name = (known after apply)
private_access = (known after apply)
public_access = (known after apply)
public_access_sources = (known after apply)
region = (known after apply)
secrets_encryption = (known after apply)
security_groups = (known after apply)
service_role = (known after apply)
subnets = (known after apply)
tags = (known after apply)
node_groups {
desired_size = (known after apply)
disk_size = (known after apply)
ec2_ssh_key = (known after apply)
gpu = (known after apply)
image_id = (known after apply)
instance_type = (known after apply)
labels = (known after apply)
max_size = (known after apply)
min_size = (known after apply)
name = (known after apply)
request_spot_instances = (known after apply)
resource_tags = (known after apply)
spot_instance_types = (known after apply)
subnets = (known after apply)
tags = (known after apply)
user_data = (known after apply)
version = (known after apply)
launch_template {
addon_job_timeout = (known after apply)
ignore_docker_version = false
kubernetes_version = "v1.21.6-rancher1-1"
prefix_path = (known after apply)
ssh_agent_auth = false
ssh_cert_path = (known after apply)
ssh_key_path = (known after apply)
win_prefix_path = (known after apply)
authentication {
authorization {
bastion_host {
cloud_provider {
custom_cloud_provider = (known after apply)
name = "openstack"
openstack_cloud_provider {
block_storage {
global {
load_balancer {
metadata {
route {
dns {
node_selector = (known after apply)
options = (known after apply)
provider = (known after apply)
reverse_cidrs = (known after apply)
upstream_nameservers = (known after apply)
linear_autoscaler_params {
nodelocal {
tolerations {
update_strategy {
strategy = (known after apply)
rolling_update {
ingress {
default_backend = (known after apply)
dns_policy = (known after apply)
extra_args = (known after apply)
http_port = (known after apply)
https_port = (known after apply)
network_mode = (known after apply)
node_selector = (known after apply)
options = (known after apply)
provider = (known after apply)
tolerations {
update_strategy {
strategy = (known after apply)
rolling_update {
monitoring {
node_selector = (known after apply)
options = (known after apply)
provider = (known after apply)
replicas = (known after apply)
tolerations {
update_strategy {
strategy = (known after apply)
rolling_update {
network {
services {
etcd {
ca_cert = (known after apply)
cert = (sensitive value)
creation = (known after apply)
extra_args = (known after apply)
gid = 0
image = (known after apply)
key = (sensitive value)
path = (known after apply)
retention = (known after apply)
snapshot = (known after apply)
uid = 0
backup_config {
kube_api {
admission_configuration = (known after apply)
always_pull_images = (known after apply)
extra_args = (known after apply)
extra_binds = (known after apply)
extra_env = (known after apply)
image = (known after apply)
pod_security_policy = (known after apply)
service_cluster_ip_range = (known after apply)
service_node_port_range = (known after apply)
audit_log {
enabled = (known after apply)
configuration {
event_rate_limit {
secrets_encryption_config {
kube_controller {
kubelet {
kubeproxy {
scheduler {
upgrade_strategy {
drain = (known after apply)
max_unavailable_controlplane = (known after apply)
max_unavailable_worker = (known after apply)
drain_input {
scheduled_cluster_scan {
enabled = (known after apply)
scan_config {
schedule_config {
rancher2_cluster_sync.cluster_openstack will be created
resource "rancher2_cluster_sync" "cluster_openstack" {
rancher2_node_pool.ctrl_pool will be created
resource "rancher2_node_pool" "ctrl_pool" {
rancher2_node_pool.etcd_pool will be created
resource "rancher2_node_pool" "etcd_pool" {
rancher2_node_pool.worker_pool will be created
resource "rancher2_node_pool" "worker_pool" {
rancher2_node_template.openstack-etcdtemplate will be created
resource "rancher2_node_template" "openstack-etcdtemplate" {
annotations = (known after apply)
description = "terraform testtemplate"
driver = (known after apply)
driver_id = (known after apply)
engine_install_url = "https://releases.rancher.com/install-docker/20.10.sh"
id = (known after apply)
labels = (known after apply)
name = (known after apply)
use_internal_ip_address = true
openstack_config {
user_data_file = <<-EOT
cloud-config
EOT
rancher2_node_template.openstack-testtemplate will be created
resource "rancher2_node_template" "openstack-testtemplate" {
annotations = (known after apply)
description = "terraform testtemplate"
driver = (known after apply)
driver_id = (known after apply)
engine_install_url = "https://releases.rancher.com/install-docker/20.10.sh"
id = (known after apply)
labels = (known after apply)
name = (known after apply)
use_internal_ip_address = true
openstack_config {
random_id.instance_id will be created
Plan: 10 to add, 0 to change, 0 to destroy.