Closed t0yv0 closed 5 months ago
Another PANIC rendered into docs.
resource "aws_finspace_kx_cluster" "example" { name = "my-tf-kx-cluster" environment_id = aws_finspace_kx_environment.example.id type = "HDB" release_label = "1.0" az_mode = "SINGLE" availability_zone_id = "use1-az2" capacity_configuration { node_type = "kx.s.2xlarge" node_count = 2 } vpc_configuration { vpc_id = aws_vpc.test.id security_group_ids = [aws_security_group.example.id] subnet_ids = [aws_subnet.example.id] ip_address_type = "IP_V4" } cache_storage_configurations { type = "CACHE_1000" size = 1200 } database { database_name = aws_finspace_kx_database.example.name cache_configuration { cache_type = "CACHE_1000" db_paths = "/" } } code { s3_bucket = aws_s3_bucket.test.id s3_key = aws_s3_object.object.key } # Depending on the amount of data cached, create/update timeouts # may need to be increased up to a potential maximum of 18 hours. timeouts { create = "18h" update = "18h" } }
to
package generated_program; import com.pulumi.Context; import com.pulumi.Pulumi; import com.pulumi.core.Output; import com.pulumi.aws.finspace.KxCluster; import com.pulumi.aws.finspace.KxClusterArgs; import com.pulumi.aws.finspace.inputs.KxClusterCapacityConfigurationArgs; import com.pulumi.aws.finspace.inputs.KxClusterVpcConfigurationArgs; import com.pulumi.aws.finspace.inputs.KxClusterCacheStorageConfigurationArgs; import com.pulumi.aws.finspace.inputs.KxClusterDatabaseArgs; import com.pulumi.aws.finspace.inputs.KxClusterCodeArgs; import java.util.List; import java.util.ArrayList; import java.util.Map; import java.io.File; import java.nio.file.Files; import java.nio.file.Paths; public class App { public static void main(String[] args) { Pulumi.run(App::stack); } public static void stack(Context ctx) { var example = new KxCluster("example", KxClusterArgs.builder() .name("my-tf-kx-cluster") .environmentId(exampleAwsFinspaceKxEnvironment.id()) .type("HDB") .releaseLabel("1.0") .azMode("SINGLE") .availabilityZoneId("use1-az2") .capacityConfiguration(KxClusterCapacityConfigurationArgs.builder() .nodeType("kx.s.2xlarge") .nodeCount(2) .build()) .vpcConfiguration(KxClusterVpcConfigurationArgs.builder() .vpcId(test.id()) .securityGroupIds(exampleAwsSecurityGroup.id()) .subnetIds(exampleAwsSubnet.id()) .ipAddressType("IP_V4") .build()) .cacheStorageConfigurations(KxClusterCacheStorageConfigurationArgs.builder() .type("CACHE_1000") .size(1200) .build()) .databases(KxClusterDatabaseArgs.builder() .databaseName(exampleAwsFinspaceKxDatabase.name()) .cacheConfiguration(%!v(PANIC=Format method: runtime error: invalid memory address or nil pointer dereference)) .build()) .code(KxClusterCodeArgs.builder() .s3Bucket(testAwsS3Bucket.id()) .s3Key(object.key()) .build()) .build()); } }
pulumi about
N/A
Vote on this issue by adding a 👍 reaction. To contribute a fix for this issue, leave a comment (and link to your pull request, if you've opened one already).
What happened?
Another PANIC rendered into docs.
Example
to
Output of
pulumi about
N/A
Additional context
N/A
Contributing
Vote on this issue by adding a 👍 reaction. To contribute a fix for this issue, leave a comment (and link to your pull request, if you've opened one already).