ChainSafe / forest-iac

Infrastructure as Code to support the Forest Filecoin project
Apache License 2.0
10 stars 5 forks source link

add snapshot service replica #416

Closed LesnyRumcajs closed 7 months ago

LesnyRumcajs commented 7 months ago

Summary of changes Changes introduced in this pull request:

Reference issue to close (if applicable)

Closes

Other information and links

github-actions[bot] commented 7 months ago

Forest: snapshot-service-2 Infrastructure Plan: success

Show Plan ``` data.local_file.init: Reading... data.external.sources_tar: Reading... data.local_file.init: Read complete after 0s [id=dc94e3ebf33fe45cb5e8946405ca9f6d06f4f21a] data.digitalocean_project.forest_project: Reading... data.digitalocean_ssh_keys.keys: Reading... data.external.sources_tar: Read complete after 0s [id=-] data.local_file.sources: Reading... data.local_file.sources: Read complete after 0s [id=bb5129f3e24d23f393420ccd3ea738485eda581b] data.digitalocean_ssh_keys.keys: Read complete after 0s [id=ssh_keys/8656000852680004208] data.digitalocean_project.forest_project: Read complete after 1s [id=da5e6601-7fd9-4d02-951e-390f7feb3411] Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + create Terraform will perform the following actions: # digitalocean_droplet.forest will be created + resource "digitalocean_droplet" "forest" { + backups = false + created_at = (known after apply) + disk = (known after apply) + graceful_shutdown = false + id = (known after apply) + image = "docker-20-04" + ipv4_address = (known after apply) + ipv4_address_private = (known after apply) + ipv6 = false + ipv6_address = (known after apply) + locked = (known after apply) + memory = (known after apply) + monitoring = true + name = "prod-forest-snapshot-2" + price_hourly = (known after apply) + price_monthly = (known after apply) + private_networking = (known after apply) + region = "fra1" + resize_disk = true + size = "s-4vcpu-16gb-amd" + ssh_keys = [ + "00:a0:c0:54:5f:40:22:10:52:8a:04:48:f9:c8:db:00", + "04:77:74:e8:81:92:9d:1e:cb:d3:5d:0d:fa:83:56:f6", + "31:fd:e9:da:70:df:ef:33:af:a2:ea:a1:fd:69:a7:9d", + "37:1e:1a:fc:25:2d:5a:a7:1f:49:b2:6d:53:5c:0e:45", + "41:91:6d:f9:f7:27:44:30:7f:a4:6f:36:e8:97:ad:cb", + "5a:a8:6d:02:66:21:e9:f7:27:b2:1c:6e:89:0f:65:77", + "77:09:d9:32:61:65:81:08:d1:e2:50:9b:ec:28:02:62", + "88:95:97:77:a1:1f:bf:e8:3a:84:20:7d:a9:4c:74:6d", + "99:ea:ec:bf:9f:d1:b2:52:02:b2:78:a2:57:25:a0:e7", + "9c:18:88:44:c4:d6:74:84:07:9a:3c:9a:f6:17:f3:e4", + "b6:03:52:e0:49:14:03:90:19:37:69:c3:c7:d0:e7:69", + "bb:7a:cc:18:56:7a:cb:2b:07:d7:8b:30:86:b8:b5:41", + "c7:f9:b0:49:24:aa:30:36:4e:5f:d4:a3:ab:43:49:e8", + "d3:6d:af:8e:a4:b9:8f:b8:38:2b:56:06:5f:38:48:a7", + "e4:0e:85:24:75:5e:f3:b1:77:c4:7d:a2:3a:1e:00:b1", + "f7:de:2d:83:ce:e7:c3:13:2c:ca:3a:f0:4b:4e:46:da", + "fa:48:60:7b:b0:c4:86:70:e9:fa:e9:f8:fb:c7:2e:72", + "fa:62:10:64:1b:77:eb:78:a5:ba:e0:86:ff:76:7e:97", + "fe:42:94:20:d0:a9:24:67:5f:de:78:c1:bb:8b:6c:92", ] + status = (known after apply) + tags = [ + "iac", + "prod", ] + urn = (known after apply) + user_data = (sensitive value) + vcpus = (known after apply) + volume_ids = (known after apply) + vpc_uuid = (known after apply) } # digitalocean_firewall.forest-firewall will be created + resource "digitalocean_firewall" "forest-firewall" { + created_at = (known after apply) + droplet_ids = (known after apply) + id = (known after apply) + name = "prod-forest-snapshot-2" + pending_changes = (known after apply) + status = (known after apply) + inbound_rule { + port_range = "22" + protocol = "tcp" + source_addresses = [ + "0.0.0.0/0", + "::/0", ] + source_droplet_ids = [] + source_kubernetes_ids = [] + source_load_balancer_uids = [] + source_tags = [] } + inbound_rule { + port_range = "2345" + protocol = "tcp" + source_addresses = [ + "0.0.0.0/0", + "::/0", ] + source_droplet_ids = [] + source_kubernetes_ids = [] + source_load_balancer_uids = [] + source_tags = [] } + inbound_rule { + port_range = "53" + protocol = "udp" + source_addresses = [ + "0.0.0.0/0", + "::/0", ] + source_droplet_ids = [] + source_kubernetes_ids = [] + source_load_balancer_uids = [] + source_tags = [] } + inbound_rule { + port_range = "80" + protocol = "tcp" + source_addresses = [ + "0.0.0.0/0", + "::/0", ] + source_droplet_ids = [] + source_kubernetes_ids = [] + source_load_balancer_uids = [] + source_tags = [] } + outbound_rule { + destination_addresses = [ + "0.0.0.0/0", + "::/0", ] + destination_droplet_ids = [] + destination_kubernetes_ids = [] + destination_load_balancer_uids = [] + destination_tags = [] + port_range = "53" + protocol = "udp" } + outbound_rule { + destination_addresses = [ + "0.0.0.0/0", + "::/0", ] + destination_droplet_ids = [] + destination_kubernetes_ids = [] + destination_load_balancer_uids = [] + destination_tags = [] + port_range = "all" + protocol = "tcp" } } # digitalocean_project_resources.connect_forest_project will be created + resource "digitalocean_project_resources" "connect_forest_project" { + id = (known after apply) + project = "da5e6601-7fd9-4d02-951e-390f7feb3411" + resources = (known after apply) } # module.monitoring[0].newrelic_alert_policy.alert will be created + resource "newrelic_alert_policy" "alert" { + account_id = (known after apply) + id = (known after apply) + incident_preference = "PER_POLICY" + name = "prod-forest-snapshot-2 alert policy" } # module.monitoring[0].newrelic_notification_channel.slack-channel[0] will be created + resource "newrelic_notification_channel" "slack-channel" { + account_id = (known after apply) + active = true + destination_id = "f902e020-5993-4425-9ae3-133084fc870d" + id = (known after apply) + name = "prod-forest-snapshot-2 slack" + product = "IINT" + status = (known after apply) + type = "SLACK" + property { + key = "channelId" + value = "C05BHMZ7GTT" } + property { + key = "customDetailsSlack" + value = "issue id - {{issueId}}" } } # module.monitoring[0].newrelic_nrql_alert_condition.disk_space will be created + resource "newrelic_nrql_alert_condition" "disk_space" { + account_id = (known after apply) + aggregation_window = (known after apply) + description = "Alert when disk space usage is high on an the service host" + enabled = true + entity_guid = (known after apply) + id = (known after apply) + name = "High Disk Utilization" + policy_id = (known after apply) + type = "static" + violation_time_limit = (known after apply) + violation_time_limit_seconds = 259200 + critical { + operator = "above" + threshold = 95 + threshold_duration = 300 + threshold_occurrences = "all" } + nrql { + query = "SELECT latest(diskUsedPercent) FROM StorageSample where entityName = 'prod-forest-snapshot-2'" } + warning { + operator = "above" + threshold = 85 + threshold_duration = 300 + threshold_occurrences = "all" } } # module.monitoring[0].newrelic_workflow.alerting-workflow-slack[0] will be created + resource "newrelic_workflow" "alerting-workflow-slack" { + account_id = (known after apply) + destinations_enabled = true + enabled = true + enrichments_enabled = true + guid = (known after apply) + id = (known after apply) + last_run = (known after apply) + muting_rules_handling = "NOTIFY_ALL_ISSUES" + name = "prod-forest-snapshot-2 slack alerting workflow" + workflow_id = (known after apply) + destination { + channel_id = (known after apply) + name = (known after apply) + notification_triggers = (known after apply) + type = (known after apply) } + issues_filter { + filter_id = (known after apply) + name = "prod-forest-snapshot-2 alerting workflow filter" + type = "FILTER" + predicate { + attribute = "labels.policyIds" + operator = "EXACTLY_MATCHES" + values = (known after apply) } } } Plan: 7 to add, 0 to change, 0 to destroy. Changes to Outputs: + ip = [ + (known after apply), ] ───────────────────────────────────────────────────────────────────────────── Saved the plan to: /home/runner/work/forest-iac/forest-iac/tfplan To perform exactly these actions, run the following command to apply: terraform apply "/home/runner/work/forest-iac/forest-iac/tfplan" ```