hashicorp / terraform-provider-azurerm

Terraform provider for Azure Resource Manager
https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs
Mozilla Public License 2.0
4.53k stars 4.6k forks source link

azurerm_monitor_diagnostic_setting continues to see drift after apply #17172

Open adhodgson1 opened 2 years ago

adhodgson1 commented 2 years ago

Is there an existing issue for this?

Community Note

Terraform Version

1.1.7

AzureRM Provider Version

3.9.0

Affected Resource(s)/Data Source(s)

azurerm_monitor_diagnostic_setting

Terraform Configuration Files

data "azurerm_monitor_diagnostic_categories" "firewall" {
  resource_id = azurerm_firewall.firewall.id
}

resource "azurerm_monitor_diagnostic_setting" "firewall_logs" {
  name               = "${local.firewall_name}-Diagnostics-Logs"
  target_resource_id = azurerm_firewall.firewall.id
  storage_account_id = var.storage_account_id

  dynamic "log" {
    for_each = data.azurerm_monitor_diagnostic_categories.firewall.logs
    content {
      category = log.value
      enabled  = true
      retention_policy {
        days    = 30
        enabled = true
      }
    }
  }
}

resource "azurerm_monitor_diagnostic_setting" "firewall_metrics" {
  name                       = "${local.firewall_name}-Diagnostics-Metrics"
  target_resource_id         = azurerm_firewall.firewall.id
  log_analytics_workspace_id = var.la_workspace_id

  dynamic "metric" {
    for_each = data.azurerm_monitor_diagnostic_categories.firewall.metrics
    content {
      category = metric.value
      enabled  = true
      retention_policy {
        days    = 30
        enabled = true
      }
    }
  }
}

Debug Output/Panic Output

Terraform state after apply:
    {
      "module": "module.firewall",
      "mode": "managed",
      "type": "azurerm_monitor_diagnostic_setting",
      "name": "firewall_logs",
      "provider": "provider[\"registry.terraform.io/hashicorp/azurerm\"]",
      "instances": [
        {
          "schema_version": 0,
          "attributes": {
            "eventhub_authorization_rule_id": "",
            "eventhub_name": "",
            "id": "[redacted]",
            "log": [
              {
                "category": "AZFWApplicationRule",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              },
              {
                "category": "AZFWApplicationRuleAggregation",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              },
              {
                "category": "AZFWDnsQuery",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              },
              {
                "category": "AZFWFqdnResolveFailure",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              },
              {
                "category": "AZFWIdpsSignature",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              },
              {
                "category": "AZFWNatRule",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              },
              {
                "category": "AZFWNatRuleAggregation",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              },
              {
                "category": "AZFWNetworkRule",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              },
              {
                "category": "AZFWNetworkRuleAggregation",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              },
              {
                "category": "AZFWThreatIntel",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              },
              {
                "category": "AzureFirewallApplicationRule",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              },
              {
                "category": "AzureFirewallDnsProxy",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              },
              {
                "category": "AzureFirewallNetworkRule",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              }
            ],
            "log_analytics_destination_type": "AzureDiagnostics",
            "log_analytics_workspace_id": "",
            "metric": [],
            "name": "firewall-01-Diagnostics-Logs",
            "storage_account_id": "[redacted]",
            "target_resource_id": "[redacted]",
            "timeouts": null
          },
          "sensitive_attributes": [],
          "private": "[redacted]",
          "dependencies": [
            "data.azurerm_storage_account.firewall_logs_storage_account",
            "module.firewall.azurerm_firewall.firewall",
            "module.firewall.data.azurerm_monitor_diagnostic_categories.firewall"
          ]
        }
      ]
    },
    {
      "module": "module.firewall",
      "mode": "managed",
      "type": "azurerm_monitor_diagnostic_setting",
      "name": "firewall_metrics",
      "provider": "provider[\"registry.terraform.io/hashicorp/azurerm\"]",
      "instances": [
        {
          "schema_version": 0,
          "attributes": {
            "eventhub_authorization_rule_id": "",
            "eventhub_name": "",
            "id": "[redacted]",
            "log": [],
            "log_analytics_destination_type": "AzureDiagnostics",
            "log_analytics_workspace_id": "[redacted]",
            "metric": [
              {
                "category": "AllMetrics",
                "enabled": true,
                "retention_policy": [
                  {
                    "days": 30,
                    "enabled": true
                  }
                ]
              }
            ],
            "name": "firewall-01-Diagnostics-Metrics",
            "storage_account_id": "",
            "target_resource_id": "[redacted]",
            "timeouts": null
          },
          "sensitive_attributes": [],
          "private": "[redacted]",
          "dependencies": [
            "data.azurerm_log_analytics_workspace.firewall_logs_law",
            "module.firewall.azurerm_firewall.firewall",
            "module.firewall.data.azurerm_monitor_diagnostic_categories.firewall"
          ]
        }
      ]
    }

Expected Behaviour

Apply successfully and further plans don't give any changes.

Actual Behaviour

Output of plan: Note: Objects have changed outside of Terraform

Terraform detected the following changes made outside of Terraform since the last "terraform apply":

module.firewall.azurerm_monitor_diagnostic_setting.firewall_logs has changed

~ resource "azurerm_monitor_diagnostic_setting" "firewall_logs" { id = "[redacted]" name = "firewall-01-Diagnostics-Logs"

(3 unchanged attributes hidden)

  + metric {
      + category = "AllMetrics"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
    # (13 unchanged blocks hidden)
}

module.firewall.azurerm_monitor_diagnostic_setting.firewall_metrics has changed

~ resource "azurerm_monitor_diagnostic_setting" "firewall_metrics" { id = "[redacted]" name = "firewall-01-Diagnostics-Metrics"

(3 unchanged attributes hidden)

  + log {
      + category = "AZFWApplicationRule"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
  + log {
      + category = "AZFWApplicationRuleAggregation"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
  + log {
      + category = "AZFWDnsQuery"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
  + log {
      + category = "AZFWFqdnResolveFailure"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
  + log {
      + category = "AZFWIdpsSignature"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
  + log {
      + category = "AZFWNatRule"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
  + log {
      + category = "AZFWNatRuleAggregation"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
  + log {
      + category = "AZFWNetworkRule"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
  + log {
      + category = "AZFWNetworkRuleAggregation"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
  + log {
      + category = "AZFWThreatIntel"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
  + log {
      + category = "AzureFirewallApplicationRule"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
  + log {
      + category = "AzureFirewallDnsProxy"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }
  + log {
      + category = "AzureFirewallNetworkRule"
      + enabled  = false

      + retention_policy {
          + days    = 0
          + enabled = false
        }
    }

    # (1 unchanged block hidden)
}

Steps to Reproduce

  1. Run Terraform plan.
  2. Run Terraform plan

It seems like when the plan is getting the settings back from the Azure API its returning all the settings back to Terraform for each category and Terraform is seeing those as extra objects that shouldn't exist.

Important Factoids

No response

References

No response

juicybaba commented 2 years ago

same there

jb68 commented 2 years ago

Similar here but for azurerm_monitor_diagnostic_setting.eventhub Was able to work around this bug by adding in tf file a setting set as false for all listed options. Something like this So to get rid of complain from terraform plan:

      - log {
          - category = "RuntimeAuditLogs" -> null
          - enabled  = false -> null

          - retention_policy {
              - days    = 0 -> null
              - enabled = false -> null
            }
        }

I added in tf file:

   log {
        category = "RuntimeAuditLogs"
        enabled = false

        retention_policy {
            days = 0
            enabled = false
        }
    }
WaitingForGuacamole commented 2 years ago

for me, it's a little worse - it's detecting no change in target_resource_id as a change, and forcing replacement:

-/+ resource "azurerm_monitor_diagnostic_setting" "this" {
      ~ id                         = "/subscriptions/<mySubscriptionId>/resourcegroups/<myResourceGroupName>/providers/microsoft.network/bastionhosts/myResource|diag-myResource" -> (known after apply)
        name                       = "diag-myResource"
      ~ target_resource_id         = "/subscriptions/<mySubscriptionId>/resourcegroups/<myResourceGroupName>/providers/microsoft.network/bastionhosts/myResource" -> "/subscriptions/<mySubscriptionId>/resourceGroups/<myResourceGroupName>/providers/Microsoft.Network/bastionHosts/myResource" # forces replacement
        # (2 unchanged attributes hidden)

      - timeouts {}
        # (2 unchanged blocks hidden)
    }

The idwill change because the target_resource_id "change" forces a replacement. But there's clearly no change in target_resource_id, so why is the provider picking it up as one?

mgressman commented 2 years ago

Same situation, slightly different symptom.

-/+ resource "azurerm_monitor_diagnostic_setting" "diagnostic_setting" { ~ id = "/subscriptions//resourceGroups//providers/Microsoft.Sql/managedInstances/|All logs to Log Analytics" -> (known after apply) name = "All logs to Log Analytics" ~ target_resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Sql/managedInstances/" -> (known after apply) # forces replacement

(1 unchanged attribute hidden)

    # (4 unchanged blocks hidden)
}

The diagnostic resource was created by Terraform so must have set the target_resource_id. So why is it that right after running apply, the plan shows this? If Terraform set the target_resource_id during the initial apply, why does it now see it as a change?

markphahn commented 2 years ago

This appears to be a documentation error, as pointed out by my son. It appears that if a given resource has the ability to set several azurerm_monitor_diagnostic_setting categories, then they all must be set in the azurerm_monitor_diagnostic_setting resource definition.

For example, for a specific resource like my example data factory resource, you can list the categories of diagnostic categories available:

az monitor diagnostic-settings categories list --resource /subscriptions/<subscription id>/resourceGroups/edw-dev-azurerm-monitor-diagnostic-setting-bug/providers/Microsoft.DataFactory/factories/edw-dev-factory 

or, to get just the category names:

az monitor diagnostic-settings categories list --resource /subscriptions/<subscription id>/resourceGroups/edw-dev-azurerm-monitor-diagnostic-setting-bug/providers/Microsoft.DataFactory/factories/edw-dev-factory | grep name
      "name": "ActivityRuns",
      "name": "PipelineRuns",
      "name": "TriggerRuns",
      "name": "SandboxPipelineRuns",
      "name": "SandboxActivityRuns",
      "name": "SSISPackageEventMessages",
      "name": "SSISPackageExecutableStatistics",
      "name": "SSISPackageEventMessageContext",
      "name": "SSISPackageExecutionComponentPhases",
      "name": "SSISPackageExecutionDataStatistics",
      "name": "SSISIntegrationRuntimeLogs",
      "name": "AllMetrics",

See: https://docs.microsoft.com/en-us/cli/azure/monitor/diagnostic-settings/categories?view=azure-cli-latest#az-monitor-diagnostic-settings-categories-list

Each of those items must be specified in your azurerm_monitor_diagnostic_setting resource. If you do not provide an explicit setting, then the terraform provdier will set reasonable defaults. But then the next time it reads you defintion (during plan) the defaults will not match what is in your resource definition (because your definition looks like it has nulls instead of the default values).

E.g. I fixed my azurerm_monitor_diagnostic_setting resource setting for my data factory resource by changing it from this

resource "azurerm_monitor_diagnostic_setting" "edw_monitor_adf_ingestion" {
  name                          = "edw-dev-diagnostics-adf-igestion"
  target_resource_id            = azurerm_data_factory.edw_factory.id

  log_analytics_workspace_id    = azurerm_log_analytics_workspace.edw_log_analytics_workspace.id
  log_analytics_destination_type = "Dedicated" # or null see [documentation][1]

  log {
    category                    = "ActivityRuns"
    enabled                     = true
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }
}

to this which specifies every posssible diagnostic category from the category list from the az command above:

resource "azurerm_monitor_diagnostic_setting" "edw_monitor_adf_ingestion" {
  name                          = "edw-dev-diagnostics-adf-igestion"
  target_resource_id            = azurerm_data_factory.edw_factory.id

  log_analytics_workspace_id    = azurerm_log_analytics_workspace.edw_log_analytics_workspace.id
  log_analytics_destination_type = "Dedicated" # or null see [documentation][1]

  log {
    category                    = "ActivityRuns"
    enabled                     = true
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }

  log {
    category                    = "PipelineRuns"
    enabled                     = fales
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }

  log {
    category                    = "TriggerRuns"
    enabled                     = fales
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }

  log {
    category                    = "SandboxActivityRuns"
    enabled                     = fales
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }

  log {
    category                    = "SandboxPipelineRuns"
    enabled                     = fales
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }

  log {
    category                    = "SSISPackageEventMessages"
    enabled                     = fales
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }

  log {
    category                    = "SSISPackageExecutableStatistics"
    enabled                     = fales
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }

  log {
    category                    = "SSISPackageEventMessageContext"
    enabled                     = fales
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }

  log {
    category                    = "SSISPackageExecutionComponentPhases"
    enabled                     = fales
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }

  log {
    category                    = "SSISPackageExecutionDataStatistics"
    enabled                     = fales
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }

  log {
    category                    = "SSISIntegrationRuntimeLogs"
    enabled                     = fales
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }

  metric {
    category                    = "AllMetrics"
    enabled                     = fales
    retention_policy {
      days                      = 0
      enabled                   = false
    }
  }
}

By fully specifing a value for each category I was able to have a specific definition in my azurerm_monitor_diagnostic_setting resource which matched the actual Azure definition set by the provider.

As a further example, using the firewall definition referenced in at the beginning of this issue, you can find all the categories you need to list for a firewall resource with this command:

 az monitor diagnostic-settings categories list --resource /subscriptions/<subscription id>/resourceGroups/edw-dev-azurerm-monitor-diagnostic-setting-bug/providers/Microsoft.Network/azureFirewalls/testfirewall | grep name
      "name": "AzureFirewallApplicationRule",
      "name": "AzureFirewallNetworkRule",
      "name": "AzureFirewallDnsProxy",
      "name": "AZFWNetworkRule",
      "name": "AZFWApplicationRule",
      "name": "AZFWNatRule",
      "name": "AZFWThreatIntel",
      "name": "AZFWIdpsSignature",
      "name": "AZFWDnsQuery",
      "name": "AZFWFqdnResolveFailure",
      "name": "AZFWApplicationRuleAggregation",
      "name": "AZFWNetworkRuleAggregation",
      "name": "AZFWNatRuleAggregation",
      "name": "AllMetrics",

For more information and a demonstration see: https://github.com/markphahn/azurerm_monitor_diagnostic_setting_bug

adhodgson1 commented 2 years ago

Thanks for the responses. The last post by @markphahn was similar to what I had followed before. However you can use the following to get the list of categories dynamically from Terraform:

data "azurerm_monitor_diagnostic_categories" "firewall" {
  resource_id = azurerm_firewall.firewall.id
}

If you set all categories to be logged this problem goes away, which I didn't make clear before:

resource "azurerm_monitor_diagnostic_setting" "firewall_diagnostics" {
  name               = "${local.firewall_name}-Diagnostics-Logs"
  target_resource_id = azurerm_firewall.firewall.id
  log_analytics_workspace_id = var.la_workspace_id

  dynamic "metric" {
    for_each = data.azurerm_monitor_diagnostic_categories.firewall.metrics
    content {
      category = metric.value
      enabled  = true
      retention_policy {
        days    = 30
        enabled = true
      }
    }
  }
  dynamic "log" {
    for_each = data.azurerm_monitor_diagnostic_categories.firewall.logs
    content {
      category = log.value
      enabled  = true
      retention_policy {
        days    = 30
        enabled = true
      }
    }
  }
}

I think this is sufficient for most users. However in my case I have to log metrics to the Log Analytics instance and logs to the storage account as per the example above, and this is continuing to see drift as per my original post. For 99% of these cases I am using the above code and don't get drift. Maybe I should have called that out in my earlier comment as I think some of the comments on this post could be solved by my solution here which is similar to that of @markphahn, only using Terraform to get the available categories instead of manually adding them.

juicybaba commented 2 years ago

I have the similar issue where terraform plan for azurerm_monitor_diagnostic_setting keep showing removing log and metrics config which don't even exist in terraform code and azure portal.

Having all log categories enabled will be a pain on costs when some categories are introduced to replace the existing categories. e.g. azure firewall.

woo-Erupt commented 2 years ago

I share this issue with you all. But, I'm also having an issue with Linked Storage. When using azurerm_log_analytics_linked_storage_account, in my tf code the data_source_type = "customlogs". But rerunning a tf plan, # azurerm_log_analytics_linked_storage_account.link_sa must be replaced -/+ resource "azurerm_log_analytics_linked_storage_account" "link_sa" { ~ data_source_type = "CustomLogs" -> "customlogs" # forces replacement It wants to "destroy and then create replacement". If I modify the data source with "CustomLogs", I get the following error, Error: expected data_source_type to be one of [customlogs azurewatson query alerts ingestion], got CustomLogs

teowa commented 2 years ago

Hi @adhodgson1 , thanks for submiting this! Seems there is a releated issue https://github.com/hashicorp/terraform-provider-azurerm/issues/7235, which gives a similar workaround like provided in above comment. And Maybe you can use below config to remove the diff in plan (both log and metric block need to be specified, and you can disabled part of them), you may also need to add a log_analytics_destination_type and log_analytics_workspace_id properties. Hope this may help.

TF config ``` resource "azurerm_monitor_diagnostic_setting" "firewall_logs" { name = "${local.firewall_name}-Diagnostics-Logs" target_resource_id = azurerm_firewall.firewall.id storage_account_id = var.storage_account_id log_analytics_workspace_id = var.la_workspace_id log_analytics_destination_type = "AzureDiagnostics" dynamic "metric" { for_each = data.azurerm_monitor_diagnostic_categories.firewall.metrics content { category = metric.value enabled = false retention_policy { days = 0 enabled = false } } } dynamic "log" { for_each = data.azurerm_monitor_diagnostic_categories.firewall.logs content { category = log.value enabled = true retention_policy { days = 30 enabled = true } } } } resource "azurerm_monitor_diagnostic_setting" "firewall_metrics" { name = "${local.firewall_name}-Diagnostics-Metrics" target_resource_id = azurerm_firewall.firewall.id log_analytics_workspace_id = var.la_workspace_id log_analytics_destination_type = "AzureDiagnostics" dynamic "metric" { for_each = data.azurerm_monitor_diagnostic_categories.firewall.metrics content { category = metric.value enabled = true retention_policy { days = 30 enabled = true } } } dynamic "log" { for_each = data.azurerm_monitor_diagnostic_categories.firewall.logs content { category = log.value enabled = false retention_policy { days = 0 enabled = false } } } } ```

And if need to enable only part of the log or metric, you can define a list in locals, and use contains grammar in the dynamic block, like in https://github.com/hashicorp/terraform-provider-azurerm/issues/10388#issuecomment-1067095863

And for @woo-Erupt , seems the azurerm_log_analytics_linked_storage_account issue is releated to https://github.com/hashicorp/terraform-provider-azurerm/issues/16234.

arbitmcdonald commented 2 years ago

My versions:

Terraform v1.2.7
on windows_amd64
+ provider registry.terraform.io/hashicorp/azuread v2.27.0
+ provider registry.terraform.io/hashicorp/azurerm v2.99.0
+ provider registry.terraform.io/hashicorp/template v2.2.0

I'm having a similar issue, but with what I believe to be slightly different symptoms to the above. I was brought here by https://github.com/hashicorp/terraform-provider-azurerm/issues/10388.

Here's what I get when planning, a number of logs 'to remove' every single time I run plan:

# azurerm_monitor_aad_diagnostic_setting.aad_logging will be updated in-place
  ~ resource "azurerm_monitor_aad_diagnostic_setting" "aad_logging" {
        id                 = "/providers/Microsoft.AADIAM/diagnosticSettings/adlogs"
        name               = "adlogs"
        # (1 unchanged attribute hidden)

      - log {
          - category = "ADFSSignInLogs" -> null
          - enabled  = false -> null

          - retention_policy {
              - days    = 0 -> null
              - enabled = false -> null
            }
        }
      - log {
          - category = "B2CRequestLogs" -> null
          - enabled  = false -> null

          - retention_policy {
              - days    = 0 -> null
              - enabled = false -> null
            }
        }
      - log {
          - category = "NetworkAccessTrafficLogs" -> null
          - enabled  = false -> null

          - retention_policy {
              - days    = 0 -> null
              - enabled = false -> null
            }
        }
      - log {
          - category = "ProvisioningLogs" -> null
          - enabled  = false -> null

          - retention_policy {
              - days    = 0 -> null
              - enabled = false -> null
            }
        }
      - log {
          - category = "RiskyServicePrincipals" -> null
          - enabled  = false -> null

          - retention_policy {
              - days    = 0 -> null
              - enabled = false -> null
            }
        }
      - log {
          - category = "ServicePrincipalRiskEvents" -> null
          - enabled  = false -> null

          - retention_policy {
              - days    = 0 -> null
              - enabled = false -> null
            }
        }

        # (7 unchanged blocks hidden)
    }

Here's my config:

resource "azurerm_monitor_aad_diagnostic_setting" "aad_logging" {
    name               = "adlogs"
    storage_account_id = azurerm_storage_account.logging.id

    depends_on = [
        azurerm_storage_account.logging
    ]

    log {
        category = "SignInLogs"
        enabled  = true
        retention_policy {
            enabled = true
            days    = 1
        }
    }
    log {
        category = "AuditLogs"
        enabled  = true
        retention_policy {
            enabled = true
            days    = 1
        }
    }
    log {
        category = "NonInteractiveUserSignInLogs"
        enabled  = true
        retention_policy {
            enabled = true
            days    = 1
        }
    }
    log {
        category = "ServicePrincipalSignInLogs"
        enabled  = true
        retention_policy {
            enabled = true
            days    = 1
        }
    }
    log {
        category = "ManagedIdentitySignInLogs"
        enabled  = true
        retention_policy {
            enabled = true
            days    = 1
        }
    }
    log {
        category = "RiskyUsers"
        enabled  = false
        retention_policy {
            days    = 0
            enabled = false
        }
    }
    log {
        category = "UserRiskEvents"
        enabled  = false
        retention_policy {
            days    = 0
            enabled = false
        }
    }
    log {
        category = "ServicePrincipalRiskEvents"
        enabled  = false

        retention_policy {
            days    = 0
            enabled = false
        }
    }
}

Others have said that they can 'silence' these by adding the logs to their configuration file. So using the ServicePrincipalRiskEvents change above as an example, I added this to my config only to get this error:

PS C:\Terraform\deploy-avd> terraform plan -out="latest.tfplan"
╷
│ Error: expected log.4.category to be one of [AuditLogs SignInLogs ADFSSignInLogs ManagedIdentitySignInLogs NonInteractiveUserSignInLogs ProvisioningLogs ServicePrincipalSignInLogs RiskyUsers UserRiskEvents], got ServicePrincipalRiskEvents
│
│   with azurerm_monitor_aad_diagnostic_setting.aad_logging,
│   on main.tf line 348, in resource "azurerm_monitor_aad_diagnostic_setting" "aad_logging":
│  348: resource "azurerm_monitor_aad_diagnostic_setting" "aad_logging" {

So every time I create a plan, it tries to modify a number of logs that do not even exist for this resource type. And when I try to silence the 'change' by manually adding the log, it tells me the category does not exist.

teowa commented 2 years ago

Hi @arbitmcdonald , thanks for submitting this. Seems the provided error of azurerm_monitor_aad_diagnostic_setting is releated to this issue #15416, And the PR to fix this has released in version 3.4.0. Maybe you can update the azurerm provider version to suppress this error.

KuznecovSemen commented 1 year ago

We have the same problem. When we change the retention_policy argument (change the value of "enabled" or change the value of "days") and run "plan" we get a message that no changes were made and "apply" does not work. Terraform version 1.1.7 AzureRM Provider version 3.35.0

Module:

resource "azurerm_monitor_diagnostic_setting" "laworkspace" {
  count              = var.diagnostic_setting == null ? 0 : 1
  name               = var.diagnostic_setting.diagnostic_setting_name
  target_resource_id = azurerm_log_analytics_workspace.laworkspace.id
  storage_account_id = var.storage_account_id

dynamic "log" {
    for_each = { for logs in var.diagnostic_setting.log : logs.category_group => logs }
    content {
      category_group = log.value.category_group
      enabled        = log.value.enabled
      retention_policy {
        enabled = log.value.retention_policy.enabled
        days    = log.value.retention_policy.enabled == true ? try(log.value.retention_policy.days, null) : null
      }
    }
  }

   dynamic "metric" {
    for_each = { for metrics in var.diagnostic_setting.metric : metrics.category => metrics }
    content {
      category = metric.value.category
      enabled  = metric.value.enabled
      retention_policy {
        enabled = metric.value.retention_policy.enabled
        days    = metric.value.retention_policy.enabled == true ? try(metric.value.retention_policy.days, null) : null
      }
    }
  }
}

Config

 diagnostic_setting = {
      diagnostic_setting_name = "la-noeu-p-centralShared-01-dgs"
      log                     = [
        {
          category_group   = "audit"
          enabled          = true
          retention_policy = {
            enabled = true
            days    = 7    
          }
        }        
      ]
       metric                  = [
        {
          category          = "AllMetrics"
          enabled           = true
          retention_policy  = {
            enabled = true
          }          
        }
       ]      
    } 
cn3mo commented 1 year ago
modules  > 2023-01-26-10:00:05 >
❯  grep 'azurerm.*version' providers.tf 

    azurerm     = { source = "hashicorp/azurerm", version = "=3.40.0" }

modules > 2023-01-26-10:00:10 >
❯  grep 'azurerm_monitor_diagnostic' -A 15  main.tf

data "azurerm_monitor_diagnostic_categories" "mssql" {
  resource_id = resource.azurerm_mssql_database.main.id
}

resource "azurerm_monitor_diagnostic_setting" "mssql" {
  name                       = "mssql"
  target_resource_id         = resource.azurerm_mssql_database.main.id
  storage_account_id         = var.storage_account.id
  log_analytics_workspace_id = "/subscriptions/${var.subscription_id}/resourceGroups/${var.resourcegroup}/providers/Microsoft.OperationalInsights/workspaces/${var.workspaces}"

  dynamic "metric" {
    iterator = metric
    for_each = data.azurerm_monitor_diagnostic_categories.mssql.metrics

    content {
      category = metric.value
      retention_policy {
        days    = 7
        enabled = true
      }
    }
  }
  dynamic "enabled_log" {
    iterator =log_category_type
    for_each = data.azurerm_monitor_diagnostic_categories.mssql.log_category_types

    content {
      category = log_category_type.value
      retention_policy {
        days    = 7
        enabled = true
      }
    }
  }
}
#
sheeeng commented 1 year ago

Reproducible with azurerm v3.33.0 version.

Terraform used the selected providers to generate the following execution
plan. Resource actions are indicated with the following symbols:
  ~ update in-place

Terraform will perform the following actions:

  # azurerm_monitor_diagnostic_setting.monitor_diagnostic_setting will be updated in-place
  ~ resource "azurerm_monitor_diagnostic_setting" "monitor_diagnostic_setting" {
        id                             = "/subscriptions/***"
      + log_analytics_destination_type = "Dedicated"
        name                           = "DiagnoseAllLogsAllMetrics"
        # (2 unchanged attributes hidden)

      - log {
          - category_group = "allLogs" -> null
          - enabled        = true -> null

          - retention_policy {
              - days    = 30 -> null
              - enabled = true -> null
            }
        }
      - log {
          - category_group = "audit" -> null
          - enabled        = false -> null

          - retention_policy {
              - days    = 0 -> null
              - enabled = false -> null
            }
        }
      + log {
          + category_group = "allLogs"
          + enabled        = true

          + retention_policy {
              + days    = 30
              + enabled = true
            }
        }

        # (1 unchanged block hidden)
    }

Plan: 0 to add, 1 to change, 0 to destroy.
ptpu commented 1 year ago

I've done it exactly like @adhodgson1 explained above.

It is working for the enabled_logs block. There are no more diffs displayed in the plan. But I'm still running in an issue with the metrics block.

        name                       = "Webapp-Diagnostics"
        # (3 unchanged attributes hidden)

      - metric {
          - category = "AllMetrics" -> null
          - enabled  = true -> null

          - retention_policy {
              - days    = 90 -> null
              - enabled = true -> null
            }
        }
      + metric {
          + category = (known after apply)
          + enabled  = (known after apply)

          + retention_policy {
              + days    = (known after apply)
              + enabled = (known after apply)
            }
        }

        # (16 unchanged blocks hidden)
    }
websolut commented 5 months ago

I am having the same issue - even if I configure all the metrics as disabled with tf:

resource "azurerm_monitor_diagnostic_setting" "storageaccount_diagnostic_setting" {

  count = var.storagacc_enable_monitoring ? 1 : 0

  name               = "${var.storagacc_name}-default-diagnostic-setting"
  target_resource_id = "${local.storagacc_id}/blobServices/default/"
  storage_account_id = var.storagacc_audit_account_id

  dynamic "enabled_log" {
    iterator = log_category
    for_each = local.storagacc_diagnostic_categories_enabled

    content {
      category = log_category.value
    }
  }

  dynamic "metric" {
    iterator = metric_category
    for_each = data.azurerm_monitor_diagnostic_categories.storagacc_diagnostic_categories.metrics

    content {
      enabled  = contains(local.storagacc_diagnostic_categories_enabled, metric_category.value) ? true : false
      category = metric_category.value
    }
  }

}

TF thinks that there is a drift:

# module.storage_account_gaming_legalhold_1.azurerm_monitor_diagnostic_setting.storageaccount_diagnostic_setting[0] will be updated in-place
  ~ resource "azurerm_monitor_diagnostic_setting" "storageaccount_diagnostic_setting" {
        id                 = ""
        name               = default-diagnostic-setting"
        # (2 unchanged attributes hidden)

      - metric {
          - category = "Capacity" -> null
          - enabled  = false -> null

          - retention_policy {
              - days    = 0 -> null
              - enabled = false -> null
            }
        }
      - metric {
          - category = "Transaction" -> null
          - enabled  = false -> null

          - retention_policy {
              - days    = 0 -> null
              - enabled = false -> null
            }
        }

        # (5 unchanged blocks hidden)
    }