Open jonbutland opened 6 months ago
Looking at the values for gvid_table and nodename, it appears that "module.
@jonbutland facing the same issue over here. did you end up figuring it out by any chance?
@jonbutland can you share some sample code so I can reproduce this ?
@yannik-projectb I got it to draw something by changing node_id = gvid_table.index(nodename) to
if "module" in nodename:
nodename_components = nodename.split(".")\n
nodename_shortened = nodename_components[-2:]
node_id = '.'.join(nodename_shortened)
else:
node_id = gvid_table.index(nodename)
in modules/tfwrapper.py, but I'm not sure if the diagram is working as intended as my diagrams seem to lack visualized subnets and autoscaling groups. This also may be because I am not using annotations. When I use this with various serverless projects, mostly lambdas and API gateways or CloudFront and S3 the diagrams seem about right.
@patrickchugh, thanks for the response. this is the code for the cert, not sure how helpful it is though. I think it just happens to be the first item in the list.
resource "aws_acm_certificate" "cert" {
domain_name = var.domainName
validation_method = "DNS"
lifecycle {
create_before_destroy = true
}
}
The failure is happening after the plan during the "Converting TF Graph Connections.. (this may take a while)" stage. My code is structured with folders for each environment with a main.tf, terraform.tfvars, and vars.tf files, and a modules folder with one or more sub modules depending on the project. I did kinda get it to work with the code I posted above, but I am unsure if the output is the expected output. Pardon my extremely sloppy Python
@patrickchugh Im having the same issue, any idea why it might be happening?
Unhandled error: <class 'ValueError'>, 'module.compute.module.cluster.aws_autoscaling_group.cluster' is not in list, <traceback object at 0x7fc590607d80>
@jonbutland thanks a lot for you response here!
Your fix actually got me a tiny bit further. At least terravision
is now doing its thing.
However, I'm stuck on another one here now:
File "/Users/xxx/terravision/modules/interpreter.py", line 299, in find_replace_values
value = replace_module_vars(modulevar_found_list, value, module, tfdata)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/xxx/terravision/modules/interpreter.py", line 194, in replace_module_vars
value = find_replace_values(value, mod, tfdata)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/xxx/terravision/modules/interpreter.py", line 299, in find_replace_values
value = replace_module_vars(modulevar_found_list, value, module, tfdata)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/xxx/terravision/modules/interpreter.py", line 194, in replace_module_vars
value = find_replace_values(value, mod, tfdata)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/xxx/terravision/modules/interpreter.py", line 299, in find_replace_values
value = replace_module_vars(modulevar_found_list, value, module, tfdata)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/xxx/terravision/modules/interpreter.py", line 194, in replace_module_vars
value = find_replace_values(value, mod, tfdata)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/xxx/terravision/modules/interpreter.py", line 292, in find_replace_values
var_found_list = re.findall("var\.[A-Za-z0-9_\-]+", value)
/Users/xxx/terravision/modules/interpreter.py:1: SyntaxWarning: invalid escape sequence '\.'
from hmac import new
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/Cellar/python@3.12/3.12.3/Frameworks/Python.framework/Versions/3.12/lib/python3.12/re/__init__.py", line 217, in findall
return _compile(pattern, flags).findall(string)
^^^^^^^^^^^^^^^^^^^^^^^^
RecursionError: maximum recursion depth exceeded
@patrickchugh -- any chance you might be able to help here?
Is there any chance I can try it with your source TF files ?
@patrickchugh thanks for your reply!
This is our staging main.tf
file:
module "terraform_state_backend" {
source = "cloudposse/tfstate-backend/aws"
version = "1.4.1"
namespace = var.namespace
stage = var.stage
profile = var.profile
name = var.name
attributes = ["state"]
terraform_backend_config_file_path = "."
terraform_backend_config_file_name = "backend.tf"
force_destroy = false
}
module "ssh_key_pair" {
source = "cloudposse/key-pair/aws"
version = "0.20.0"
namespace = var.namespace
stage = var.stage
name = var.name
generate_ssh_key = false
ssh_public_key_path = "../keys"
}
module "vpc" {
source = "cloudposse/vpc/aws"
version = "2.2.0"
namespace = var.namespace
stage = var.stage
name = var.name
attributes = var.attributes
tags = var.tags
delimiter = var.delimiter
ipv4_primary_cidr_block = "172.31.0.0/16"
}
module "subnets" {
source = "cloudposse/dynamic-subnets/aws"
version = "2.4.2"
namespace = var.namespace
stage = var.stage
name = var.name
attributes = ["0"]
tags = var.tags
delimiter = var.delimiter
availability_zones = var.availability_zones
vpc_id = module.vpc.vpc_id
igw_id = [module.vpc.igw_id]
ipv4_cidr_block = [module.vpc.vpc_cidr_block]
nat_gateway_enabled = true
nat_instance_enabled = false
}
module "kms_key" {
source = "cloudposse/kms-key/aws"
version = "0.12.2"
namespace = var.namespace
stage = var.stage
name = var.name
description = "KMS key for API"
deletion_window_in_days = 30
enable_key_rotation = false
alias = join("/", ["alias", join("-", [var.namespace, var.name, var.stage])])
}
resource "aws_secretsmanager_secret" "env_secret" {
name = join("-", [var.namespace, var.name, var.stage, "secrets"])
kms_key_id = module.kms_key.key_id
recovery_window_in_days = 30
}
resource "aws_secretsmanager_secret_version" "env_secret_version" {
secret_id = aws_secretsmanager_secret.env_secret.id
secret_string = jsonencode(merge(var.eb_env_vars, {
REDIS_URL = join("", ["rediss://", module.redis.endpoint, ":", module.redis.port])
DATABASE_URL = join("", ["postgres://", var.rds_database_user, ":", var.rds_database_password, "@", module.rds_instance.instance_endpoint, "/", var.rds_database_name])
AWS_S3_FILES_BUCKET_NAME = module.s3_bucket_share.bucket_id
AWS_S3_PUBLIC_BUCKET_NAME = module.cdn_assets.s3_bucket
AWS_S3_PUBLIC_BUCKET_BASE_URL = join("", ["https://", module.cdn_assets.aliases[0]])
}))
depends_on = [module.rds_instance, module.redis, module.s3_bucket_share, module.cdn_assets]
}
module "rds_instance" {
source = "cloudposse/rds/aws"
version = "1.1.0"
namespace = var.namespace
stage = var.stage
name = var.name
database_name = var.rds_database_name
database_user = var.rds_database_user
database_password = var.rds_database_password
database_port = var.rds_database_port
multi_az = var.rds_multi_az
storage_type = var.rds_storage_type
allocated_storage = var.rds_allocated_storage
storage_encrypted = var.rds_storage_encrypted
engine = var.rds_engine
engine_version = var.rds_engine_version
instance_class = var.rds_instance_class
db_parameter_group = var.rds_db_parameter_group
publicly_accessible = var.rds_publicly_accessible
vpc_id = module.vpc.vpc_id
subnet_ids = module.subnets.private_subnet_ids
security_group_ids = [module.vpc.vpc_default_security_group_id]
apply_immediately = var.rds_apply_immediately
}
module "redis" {
source = "cloudposse/elasticache-redis/aws"
version = "1.2.2"
availability_zones = var.availability_zones
namespace = var.namespace
stage = var.stage
name = var.name
attributes = ["1"]
zone_id = []
apply_immediately = true
vpc_id = module.vpc.vpc_id
allowed_security_groups = [module.vpc.vpc_default_security_group_id]
subnets = module.subnets.private_subnet_ids
cluster_size = var.redis_cluster_size
instance_type = var.redis_instance_type
multi_az_enabled = var.redis_multi_az_enabled
automatic_failover_enabled = var.redis_automatic_failover_enabled
engine_version = var.redis_engine_version
family = var.redis_family
at_rest_encryption_enabled = var.redis_at_rest_encryption_enabled
transit_encryption_enabled = var.redis_transit_encryption_enabled
parameter = [
{
name = "notify-keyspace-events"
value = "lK"
}
]
}
module "acm_request_certificate" {
source = "cloudposse/acm-request-certificate/aws"
version = "0.18.0"
domain_name = join(".", ["*", var.domain])
process_domain_validation_options = false
ttl = "300"
tags = var.tags
wait_for_certificate_issued = true
zone_name = var.domain
}
data "aws_iam_policy_document" "eb_extended_permissions" {
statement {
sid = "AllowS3OperationsOnElasticBeanstalkBuckets"
actions = [
"s3:ListAllMyBuckets",
"s3:GetBucketLocation"
]
resources = ["*"]
}
statement {
sid = "AllowSecretsManagerOperations"
actions = [
"secretsmanager:GetSecretValue",
]
resources = [
aws_secretsmanager_secret.env_secret.arn
]
}
statement {
sid = "AllowKMSOperations"
actions = [
"kms:Decrypt",
"kms:DescribeKey"
]
resources = [
module.kms_key.key_arn
]
}
}
module "elastic_beanstalk_application" {
source = "cloudposse/elastic-beanstalk-application/aws"
version = "0.12.0"
namespace = var.namespace
stage = var.stage
name = var.name
attributes = var.attributes
tags = var.tags
delimiter = var.delimiter
description = var.description
}
module "elastic_beanstalk_environment_docker" {
source = "cloudposse/elastic-beanstalk-environment/aws"
version = "0.51.2"
namespace = var.namespace
stage = var.stage
name = join("-", [var.name, "docker"])
attributes = var.attributes
tags = var.tags
delimiter = var.delimiter
description = join("-", [var.description, "docker"])
region = var.region
keypair = module.ssh_key_pair.key_name
dns_zone_id = var.dns_zone_id
dns_subdomain = var.dns_subdomain
availability_zone_selector = var.eb_availability_zone_selector
wait_for_ready_timeout = var.eb_wait_for_ready_timeout
elastic_beanstalk_application_name = module.elastic_beanstalk_application.elastic_beanstalk_application_name
environment_type = var.eb_environment_type
loadbalancer_type = var.eb_loadbalancer_type
loadbalancer_ssl_policy = var.eb_loadbalancer_ssl_policy
loadbalancer_certificate_arn = module.acm_request_certificate.arn
elb_scheme = var.eb_elb_scheme
tier = var.eb_tier
version_label = var.eb_version_label
force_destroy = var.eb_force_destroy
instance_type = var.eb_instance_type
root_volume_size = var.eb_root_volume_size
root_volume_type = var.eb_root_volume_type
autoscale_min = var.eb_autoscale_min
autoscale_max = var.eb_autoscale_max
autoscale_measure_name = var.eb_autoscale_measure_name
autoscale_statistic = var.eb_autoscale_statistic
autoscale_unit = var.eb_autoscale_unit
autoscale_lower_bound = var.eb_autoscale_lower_bound
autoscale_lower_increment = var.eb_autoscale_lower_increment
autoscale_upper_bound = var.eb_autoscale_upper_bound
autoscale_upper_increment = var.eb_autoscale_upper_increment
vpc_id = module.vpc.vpc_id
loadbalancer_subnets = module.subnets.public_subnet_ids
application_subnets = module.subnets.public_subnet_ids
associated_security_group_ids = [module.vpc.vpc_default_security_group_id]
create_security_group = false
rolling_update_enabled = var.eb_rolling_update_enabled
rolling_update_type = var.eb_rolling_update_type
updating_min_in_service = var.eb_updating_min_in_service
updating_max_batch = var.eb_updating_max_batch
healthcheck_url = var.eb_healthcheck_url
application_port = var.eb_application_port
solution_stack_name = "64bit Amazon Linux 2023 v4.3.2 running Docker"
additional_settings = var.eb_additional_settings
env_vars = {
key = value
}
extended_ec2_policy_document = data.aws_iam_policy_document.eb_extended_permissions.json
prefer_legacy_ssm_policy = false
associate_public_ip_address = var.eb_associate_public_ip_address
depends_on = [module.rds_instance, module.redis]
}
module "s3_bucket_share" {
source = "cloudposse/s3-bucket/aws"
version = "4.2.0"
namespace = var.namespace
stage = var.stage
name = join("-", [var.name, "share"])
s3_object_ownership = "BucketOwnerEnforced"
enabled = true
user_enabled = true
versioning_enabled = false
}
module "s3_bucket_parser" {
source = "cloudposse/s3-bucket/aws"
version = "4.2.0"
namespace = var.namespace
stage = var.stage
name = join("-", [var.name, "parser"])
s3_object_ownership = "BucketOwnerEnforced"
enabled = true
user_enabled = true
versioning_enabled = false
}
locals {
s3_bucket_share_arn = module.s3_bucket_share.user_arn
}
module "cdn_assets" {
source = "cloudposse/cloudfront-s3-cdn/aws"
version = "0.93.0"
namespace = var.namespace
stage = var.stage
name = join("-", [var.name, "assets"])
aliases = [join(".", [join("-", ["assets", var.stage]), var.domain])]
dns_alias_enabled = true
parent_zone_id = var.dns_zone_id
acm_certificate_arn = var.us_east_1_acm_certificate_arn
versioning_enabled = false
cloudfront_access_logging_enabled = false
cloudfront_access_log_create_bucket = false
log_versioning_enabled = false
block_origin_public_access_enabled = true
deployment_principal_arns = {
"${local.s3_bucket_share_arn}" = [""]
}
}
module "ec2_bastion" {
source = "cloudposse/ec2-bastion-server/aws"
version = "0.30.1"
namespace = var.namespace
stage = var.stage
name = join("-", [var.name, "bastion"])
delimiter = var.delimiter
ami = var.bastion_ami_id # prevents automatic updates of AMI to latest (which always forces replacement)
enabled = var.bastion_enabled
security_groups = compact(concat([module.vpc.vpc_default_security_group_id]))
security_group_rules = var.bastion_security_group_rules
subnets = module.subnets.public_subnet_ids
key_name = module.ssh_key_pair.key_name
vpc_id = module.vpc.vpc_id
associate_public_ip_address = var.bastion_associate_public_ip_address
}
# Expose outputs
output "s3_share_aws_access_key_id" {
sensitive = true
value = module.s3_bucket_share.access_key_id
}
output "s3_share_aws_secret_access_key" {
sensitive = true
value = module.s3_bucket_share.secret_access_key
}
output "s3_share_bucket_name" {
value = module.s3_bucket_share.bucket_id
}
output "cdn_bucket_name" {
value = module.cdn_assets.s3_bucket
}
output "s3_parser_aws_access_key_id" {
sensitive = true
value = module.s3_bucket_parser.access_key_id
}
output "s3_parser_aws_secret_access_key" {
sensitive = true
value = module.s3_bucket_parser.secret_access_key
}
output "s3_parser_bucket_name" {
value = module.s3_bucket_parser.bucket_id
}
output "cdn_aliases" {
value = module.cdn_assets.aliases
}
backend.tf
terraform {
required_version = ">= 1.0.0"
backend "s3" {
region = "eu-west-3"
bucket = "xxx"
key = "terraform.tfstate"
profile = "xxx"
encrypt = "true"
dynamodb_table = "xxx"
}
}
Please lmk if you need any further info. :)
I can confirm that module.vpc.aws_acm_certificate.cert exists in my state file, and plan output when I run terraform plan manually.
Traceback (most recent call last): File "/terravision/terravision", line 289, in
cli(
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1130, in call
return self.main(args, kwargs)
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1055, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, ctx.params)
File "/usr/local/lib/python3.9/site-packages/click/core.py", line 760, in invoke
return __callback(args, **kwargs)
File "/terravision/terravision", line 207, in draw
tfdata = compile_tfdata(source, varfile, workspace, debug, annotate)
File "/terravision/terravision", line 50, in compile_tfdata
tfdata = tfwrapper.tf_makegraph(tfdata)
File "/terravision/modules/tfwrapper.py", line 219, in tf_makegraph
node_id = gvid_table.index(nodename)
ValueError: 'module.vpc.aws_acm_certificate.cert' is not in list